From 45178c87a3099a9fef8bae6f7249ca306cf89629 Mon Sep 17 00:00:00 2001 From: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Date: Wed, 14 Sep 2022 15:44:24 -0500 Subject: develop Merge doxygen from 1.12 branch (#2095) --- CMakeLists.txt | 5 + CTestConfig.cmake | 2 +- config/cmake/UseJava.cmake | 1 + configure.ac | 6 +- doxygen/CMakeLists.txt | 5 +- doxygen/Doxyfile.in | 10 +- doxygen/aliases | 2 +- doxygen/dox/DDLBNF110.dox | 2 +- doxygen/dox/FileFormatSpec.dox | 23 + doxygen/dox/GettingStarted.dox | 101 +- doxygen/dox/IntroHDF5.dox | 627 + doxygen/dox/LearnBasics.dox | 183 + doxygen/dox/LearnBasics1.dox | 1023 + doxygen/dox/LearnBasics2.dox | 1159 + doxygen/dox/LearnBasics3.dox | 1015 + doxygen/dox/LearnHDFView.dox | 472 + doxygen/dox/ReferenceManual.dox | 59 +- doxygen/dox/Specifications.dox | 16 +- doxygen/dox/TechnicalNotes.dox | 16 +- doxygen/dox/UsersGuide.dox | 403 + doxygen/dox/ViewTools.dox | 1198 + doxygen/dox/ViewTools2.dox | 786 + doxygen/dox/ViewToolsJPSS.dox | 763 + doxygen/dox/high_level/extension.dox | 543 +- doxygen/dox/high_level/high_level.dox | 29 - doxygen/dox/rm-template.dox | 2 +- doxygen/examples/H5.format.1.0.html | 2 +- doxygen/examples/H5.format.1.1.html | 2 +- doxygen/examples/H5.format.2.0.html | 26718 ++++++++++--------- doxygen/examples/H5.format.html | 2 +- doxygen/examples/ThreadSafeLibrary.html | 10 +- doxygen/examples/core_menu.md | 69 + doxygen/examples/fortran_menu.md | 73 + doxygen/examples/high_level_menu.md | 30 + doxygen/examples/java_menu.md | 84 + doxygen/hdf5doxy_layout.xml | 3 +- doxygen/img/DataGroup.png | Bin 0 -> 41665 bytes doxygen/img/Dmodel_fig1.gif | Bin 0 -> 13259 bytes doxygen/img/Dmodel_fig10.gif | Bin 0 -> 11552 bytes doxygen/img/Dmodel_fig11_b.gif | Bin 0 -> 13924 bytes doxygen/img/Dmodel_fig12_a.gif | Bin 0 -> 3182 bytes doxygen/img/Dmodel_fig12_b.gif | Bin 0 -> 4028 bytes doxygen/img/Dmodel_fig14_a.gif | Bin 0 -> 5367 bytes doxygen/img/Dmodel_fig14_b.gif | Bin 0 -> 6432 bytes doxygen/img/Dmodel_fig14_c.gif | Bin 0 -> 7397 bytes doxygen/img/Dmodel_fig14_d.gif | Bin 0 -> 9898 bytes doxygen/img/Dmodel_fig2.gif | Bin 0 -> 12024 bytes doxygen/img/Dmodel_fig3_a.gif | Bin 0 -> 7427 bytes doxygen/img/Dmodel_fig3_c.gif | Bin 0 -> 6800 bytes doxygen/img/Dmodel_fig4_a.gif | Bin 0 -> 4239 bytes doxygen/img/Dmodel_fig4_b.gif | Bin 0 -> 24587 bytes doxygen/img/Dmodel_fig5.gif | Bin 0 -> 7431 bytes doxygen/img/Dmodel_fig6.gif | Bin 0 -> 6497 bytes doxygen/img/Dmodel_fig7_b.gif | Bin 0 -> 26637 bytes doxygen/img/Dmodel_fig8.gif | Bin 0 -> 12217 bytes doxygen/img/Dmodel_fig9.gif | Bin 0 -> 14812 bytes doxygen/img/Dsets_NbitFloating1.gif | Bin 0 -> 6815 bytes doxygen/img/Dsets_NbitFloating2.gif | Bin 0 -> 9335 bytes doxygen/img/Dsets_NbitInteger1.gif | Bin 0 -> 6489 bytes doxygen/img/Dsets_NbitInteger2.gif | Bin 0 -> 10471 bytes doxygen/img/Dsets_fig1.gif | Bin 0 -> 10803 bytes doxygen/img/Dsets_fig10.gif | Bin 0 -> 6538 bytes doxygen/img/Dsets_fig11.gif | Bin 0 -> 21211 bytes doxygen/img/Dsets_fig12.gif | Bin 0 -> 36869 bytes doxygen/img/Dsets_fig2.gif | Bin 0 -> 18083 bytes doxygen/img/Dsets_fig3.gif | Bin 0 -> 27621 bytes doxygen/img/Dsets_fig4.gif | Bin 0 -> 41416 bytes doxygen/img/Dsets_fig5.gif | Bin 0 -> 15893 bytes doxygen/img/Dsets_fig6.gif | Bin 0 -> 2509 bytes doxygen/img/Dsets_fig7.gif | Bin 0 -> 2556 bytes doxygen/img/Dsets_fig8.gif | Bin 0 -> 2273 bytes doxygen/img/Dsets_fig9.gif | Bin 0 -> 4368 bytes doxygen/img/Dspace_CvsF1.gif | Bin 0 -> 8623 bytes doxygen/img/Dspace_CvsF2.gif | Bin 0 -> 8623 bytes doxygen/img/Dspace_CvsF3.gif | Bin 0 -> 8909 bytes doxygen/img/Dspace_CvsF4.gif | Bin 0 -> 8470 bytes doxygen/img/Dspace_combine.gif | Bin 0 -> 5101 bytes doxygen/img/Dspace_complex.gif | Bin 0 -> 6715 bytes doxygen/img/Dspace_features.gif | Bin 0 -> 89809 bytes doxygen/img/Dspace_features_cmpd.gif | Bin 0 -> 31274 bytes doxygen/img/Dspace_move.gif | Bin 0 -> 13255 bytes doxygen/img/Dspace_point.gif | Bin 0 -> 6697 bytes doxygen/img/Dspace_read.gif | Bin 0 -> 14238 bytes doxygen/img/Dspace_select.gif | Bin 0 -> 29452 bytes doxygen/img/Dspace_separate.gif | Bin 0 -> 9911 bytes doxygen/img/Dspace_simple.gif | Bin 0 -> 9709 bytes doxygen/img/Dspace_subset.gif | Bin 0 -> 4790 bytes doxygen/img/Dspace_three_datasets.gif | Bin 0 -> 10912 bytes doxygen/img/Dspace_transfer.gif | Bin 0 -> 15096 bytes doxygen/img/Dspace_write1to2.gif | Bin 0 -> 38748 bytes doxygen/img/Dtypes_fig1.gif | Bin 0 -> 8857 bytes doxygen/img/Dtypes_fig10.gif | Bin 0 -> 41804 bytes doxygen/img/Dtypes_fig11.gif | Bin 0 -> 79789 bytes doxygen/img/Dtypes_fig12.gif | Bin 0 -> 15692 bytes doxygen/img/Dtypes_fig13a.gif | Bin 0 -> 3350 bytes doxygen/img/Dtypes_fig13b.gif | Bin 0 -> 3571 bytes doxygen/img/Dtypes_fig13c.gif | Bin 0 -> 2629 bytes doxygen/img/Dtypes_fig13d.gif | Bin 0 -> 2769 bytes doxygen/img/Dtypes_fig14.gif | Bin 0 -> 50174 bytes doxygen/img/Dtypes_fig15.gif | Bin 0 -> 30871 bytes doxygen/img/Dtypes_fig16.gif | Bin 0 -> 5420 bytes doxygen/img/Dtypes_fig16a.gif | Bin 0 -> 10896 bytes doxygen/img/Dtypes_fig16b.gif | Bin 0 -> 6908 bytes doxygen/img/Dtypes_fig16c.gif | Bin 0 -> 6847 bytes doxygen/img/Dtypes_fig16d.gif | Bin 0 -> 9850 bytes doxygen/img/Dtypes_fig17a.gif | Bin 0 -> 13623 bytes doxygen/img/Dtypes_fig17b.gif | Bin 0 -> 30154 bytes doxygen/img/Dtypes_fig18.gif | Bin 0 -> 9037 bytes doxygen/img/Dtypes_fig19.gif | Bin 0 -> 12014 bytes doxygen/img/Dtypes_fig2.gif | Bin 0 -> 6099 bytes doxygen/img/Dtypes_fig20a.gif | Bin 0 -> 31836 bytes doxygen/img/Dtypes_fig20b.gif | Bin 0 -> 17044 bytes doxygen/img/Dtypes_fig20c.gif | Bin 0 -> 24983 bytes doxygen/img/Dtypes_fig20d.gif | Bin 0 -> 14435 bytes doxygen/img/Dtypes_fig21.gif | Bin 0 -> 56286 bytes doxygen/img/Dtypes_fig22.gif | Bin 0 -> 40000 bytes doxygen/img/Dtypes_fig23.gif | Bin 0 -> 61311 bytes doxygen/img/Dtypes_fig24.gif | Bin 0 -> 30529 bytes doxygen/img/Dtypes_fig25a.gif | Bin 0 -> 17268 bytes doxygen/img/Dtypes_fig25c.gif | Bin 0 -> 17238 bytes doxygen/img/Dtypes_fig26.gif | Bin 0 -> 14866 bytes doxygen/img/Dtypes_fig27.gif | Bin 0 -> 57099 bytes doxygen/img/Dtypes_fig28.gif | Bin 0 -> 49961 bytes doxygen/img/Dtypes_fig3.gif | Bin 0 -> 5654 bytes doxygen/img/Dtypes_fig4.gif | Bin 0 -> 14387 bytes doxygen/img/Dtypes_fig5.gif | Bin 0 -> 16959 bytes doxygen/img/Dtypes_fig6.gif | Bin 0 -> 52742 bytes doxygen/img/Dtypes_fig7.gif | Bin 0 -> 55938 bytes doxygen/img/Dtypes_fig8.gif | Bin 0 -> 20671 bytes doxygen/img/Dtypes_fig9.gif | Bin 0 -> 22036 bytes doxygen/img/Files_fig3.gif | Bin 0 -> 44382 bytes doxygen/img/Files_fig4.gif | Bin 0 -> 38862 bytes doxygen/img/Groups_fig1.gif | Bin 0 -> 5404 bytes doxygen/img/Groups_fig10_a.gif | Bin 0 -> 7745 bytes doxygen/img/Groups_fig10_b.gif | Bin 0 -> 6372 bytes doxygen/img/Groups_fig10_c.gif | Bin 0 -> 8308 bytes doxygen/img/Groups_fig10_d.gif | Bin 0 -> 12963 bytes doxygen/img/Groups_fig11_a.gif | Bin 0 -> 7349 bytes doxygen/img/Groups_fig11_b.gif | Bin 0 -> 7912 bytes doxygen/img/Groups_fig11_c.gif | Bin 0 -> 8589 bytes doxygen/img/Groups_fig11_d.gif | Bin 0 -> 9552 bytes doxygen/img/Groups_fig2.gif | Bin 0 -> 6187 bytes doxygen/img/Groups_fig3.gif | Bin 0 -> 5045 bytes doxygen/img/Groups_fig4.gif | Bin 0 -> 12187 bytes doxygen/img/Groups_fig5.gif | Bin 0 -> 10459 bytes doxygen/img/Groups_fig6.gif | Bin 0 -> 13979 bytes doxygen/img/Groups_fig9_a.gif | Bin 0 -> 6313 bytes doxygen/img/Groups_fig9_aa.gif | Bin 0 -> 7923 bytes doxygen/img/Groups_fig9_b.gif | Bin 0 -> 7352 bytes doxygen/img/Groups_fig9_bb.gif | Bin 0 -> 7336 bytes doxygen/img/LBDsetSubRWProg.png | Bin 0 -> 18297 bytes doxygen/img/Pmodel_fig2.gif | Bin 0 -> 4411 bytes doxygen/img/Pmodel_fig3.gif | Bin 0 -> 39263 bytes doxygen/img/Pmodel_fig5_a.gif | Bin 0 -> 17234 bytes doxygen/img/Pmodel_fig5_b.gif | Bin 0 -> 20671 bytes doxygen/img/Pmodel_fig5_c.gif | Bin 0 -> 23897 bytes doxygen/img/Pmodel_fig5_d.gif | Bin 0 -> 23575 bytes doxygen/img/Pmodel_fig5_e.gif | Bin 0 -> 4304 bytes doxygen/img/Pmodel_fig6.gif | Bin 0 -> 11996 bytes doxygen/img/PropListClassInheritance.gif | Bin 0 -> 17349 bytes doxygen/img/PropListEcosystem.gif | Bin 0 -> 3720 bytes doxygen/img/Shared_Attribute.jpg | Bin 0 -> 37209 bytes doxygen/img/StormDataset.png | Bin 0 -> 21441 bytes doxygen/img/UML_Attribute.jpg | Bin 0 -> 36134 bytes doxygen/img/UML_FileAndProps.gif | Bin 0 -> 6161 bytes doxygen/img/VFL_Drivers.gif | Bin 0 -> 17638 bytes doxygen/img/cmpnddtype.png | Bin 0 -> 19354 bytes doxygen/img/crtatt.png | Bin 0 -> 30414 bytes doxygen/img/crtdset.png | Bin 0 -> 18200 bytes doxygen/img/crtf-pic.png | Bin 0 -> 14322 bytes doxygen/img/crtgrp.png | Bin 0 -> 15588 bytes doxygen/img/dataset.png | Bin 0 -> 25110 bytes doxygen/img/datasetwdata.png | Bin 0 -> 71012 bytes doxygen/img/dataspace.png | Bin 0 -> 15883 bytes doxygen/img/dataspace1.png | Bin 0 -> 11510 bytes doxygen/img/datatype.png | Bin 0 -> 11848 bytes doxygen/img/dtypes_fig25b.gif | Bin 0 -> 6634 bytes doxygen/img/fileobj.png | Bin 0 -> 108220 bytes doxygen/img/group.png | Bin 0 -> 122668 bytes doxygen/img/hdfview-anthrstrm-img.png | Bin 0 -> 68911 bytes doxygen/img/hdfview-anthrstrm-sprdsht.png | Bin 0 -> 63861 bytes doxygen/img/hdfview-anthrstrm.png | Bin 0 -> 46472 bytes doxygen/img/hdfview-imgicon.png | Bin 0 -> 81394 bytes doxygen/img/hdfview-imgprop.png | Bin 0 -> 83103 bytes doxygen/img/hdfview-imgsubset.png | Bin 0 -> 82068 bytes doxygen/img/hdfview-newcmpd.png | Bin 0 -> 100495 bytes doxygen/img/hdfview-newimgsubset.png | Bin 0 -> 85051 bytes doxygen/img/hdfview-prop.png | Bin 0 -> 114672 bytes doxygen/img/hdfview-qf.png | Bin 0 -> 170686 bytes doxygen/img/hdfview-regref.png | Bin 0 -> 231317 bytes doxygen/img/hdfview-regref1.png | Bin 0 -> 134813 bytes doxygen/img/hdfview-regref2.png | Bin 0 -> 195189 bytes doxygen/img/hdfview-regrefval.png | Bin 0 -> 56209 bytes doxygen/img/hdfview-table.png | Bin 0 -> 61329 bytes doxygen/img/hdfview-tree.png | Bin 0 -> 38283 bytes doxygen/img/imgLBDsetCreate.gif | Bin 0 -> 954 bytes doxygen/img/imgLBDsetSubRW11.png | Bin 0 -> 23645 bytes doxygen/img/imgLBDsetSubRW12.png | Bin 0 -> 25324 bytes doxygen/img/imgLBDsetSubRW31.png | Bin 0 -> 17212 bytes doxygen/img/imgLBDsetSubRW32.png | Bin 0 -> 18201 bytes doxygen/img/imgLBDsetSubRW33.png | Bin 0 -> 17781 bytes doxygen/img/imgLBFile.gif | Bin 0 -> 635 bytes doxygen/img/imggrpcreate.gif | Bin 0 -> 928 bytes doxygen/img/imggrpdsets.gif | Bin 0 -> 1812 bytes doxygen/img/imggrps.gif | Bin 0 -> 1644 bytes doxygen/img/newgroupimage.png | Bin 0 -> 44700 bytes doxygen/img/noattrs.png | Bin 0 -> 43386 bytes doxygen/img/properties.png | Bin 0 -> 50472 bytes doxygen/img/scarletletter.png | Bin 0 -> 51013 bytes doxygen/img/showasimage.png | Bin 0 -> 66016 bytes doxygen/img/storm.png | Bin 0 -> 41590 bytes doxygen/img/tutr-lochk.png | Bin 0 -> 9419 bytes doxygen/img/tutr-lochks.png | Bin 0 -> 9652 bytes doxygen/img/tutr-locons.png | Bin 0 -> 8133 bytes doxygen/img/vol_architecture.png | Bin 0 -> 46329 bytes fortran/src/H5Aff.F90 | 6 +- fortran/src/H5Dff.F90 | 52 +- fortran/src/H5Fff.F90 | 10 +- fortran/src/H5Lff.F90 | 15 +- fortran/src/H5Off.F90 | 1 + fortran/src/H5Pff.F90 | 28 +- fortran/src/H5Rff.F90 | 9 +- fortran/src/H5Sff.F90 | 4 +- fortran/src/H5Tff.F90 | 4 +- fortran/src/H5VLff.F90 | 2 +- fortran/src/H5Zff.F90 | 7 + fortran/src/H5_buildiface.F90 | 4 +- fortran/src/H5_ff.F90 | 8 +- hl/fortran/src/H5DSff.F90 | 25 +- hl/fortran/src/H5IMff.F90 | 30 +- hl/fortran/src/H5LTff.F90 | 30 +- hl/fortran/src/H5TBff.F90 | 36 +- hl/src/H5DOpublic.h | 6 +- hl/src/H5DSpublic.h | 28 +- hl/src/H5IMpublic.h | 8 +- hl/src/H5LDpublic.h | 6 +- hl/src/H5LTpublic.h | 128 +- hl/src/H5PTpublic.h | 8 +- hl/src/H5TBpublic.h | 8 +- java/examples/groups/H5Ex_G_Visit.java | 2 +- java/src/Makefile.am | 3 +- java/src/hdf/hdf5lib/CMakeLists.txt | 1 - java/src/hdf/hdf5lib/H5.java | 3549 ++- java/src/hdf/hdf5lib/HDF5Constants.java | 13 +- java/src/hdf/hdf5lib/HDF5GroupInfo.java | 182 - java/src/hdf/hdf5lib/HDFArray.java | 7 +- java/src/hdf/hdf5lib/HDFNativeData.java | 7 +- java/src/hdf/hdf5lib/callbacks/Callbacks.java | 6 +- java/src/hdf/hdf5lib/callbacks/H5A_iterate_cb.java | 2 + java/src/hdf/hdf5lib/callbacks/H5D_append_cb.java | 2 + java/src/hdf/hdf5lib/callbacks/H5D_iterate_cb.java | 2 + java/src/hdf/hdf5lib/callbacks/H5E_walk_cb.java | 2 + java/src/hdf/hdf5lib/callbacks/H5L_iterate_t.java | 2 + java/src/hdf/hdf5lib/callbacks/H5O_iterate_t.java | 2 + .../hdf5lib/callbacks/H5P_cls_close_func_cb.java | 2 + .../hdf5lib/callbacks/H5P_cls_copy_func_cb.java | 2 + .../hdf5lib/callbacks/H5P_cls_create_func_cb.java | 2 + java/src/hdf/hdf5lib/callbacks/H5P_iterate_cb.java | 2 + .../hdf5lib/callbacks/H5P_prp_close_func_cb.java | 2 + .../hdf5lib/callbacks/H5P_prp_compare_func_cb.java | 2 + .../hdf5lib/callbacks/H5P_prp_copy_func_cb.java | 2 + .../hdf5lib/callbacks/H5P_prp_create_func_cb.java | 2 + .../hdf5lib/callbacks/H5P_prp_delete_func_cb.java | 2 + .../hdf/hdf5lib/callbacks/H5P_prp_get_func_cb.java | 2 + .../hdf/hdf5lib/callbacks/H5P_prp_set_func_cb.java | 2 + java/src/hdf/hdf5lib/callbacks/package-info.java | 1 + .../hdf5lib/exceptions/HDF5AttributeException.java | 6 +- .../hdf/hdf5lib/exceptions/HDF5BtreeException.java | 6 +- .../exceptions/HDF5DataFiltersException.java | 6 +- .../exceptions/HDF5DataStorageException.java | 6 +- .../exceptions/HDF5DatasetInterfaceException.java | 6 +- .../HDF5DataspaceInterfaceException.java | 6 +- .../exceptions/HDF5DatatypeInterfaceException.java | 6 +- java/src/hdf/hdf5lib/exceptions/HDF5Exception.java | 18 +- .../exceptions/HDF5ExternalFileListException.java | 6 +- .../exceptions/HDF5FileInterfaceException.java | 6 +- .../exceptions/HDF5FunctionArgumentException.java | 6 +- .../exceptions/HDF5FunctionEntryExitException.java | 6 +- .../hdf/hdf5lib/exceptions/HDF5HeapException.java | 6 +- .../hdf/hdf5lib/exceptions/HDF5IdException.java | 6 +- .../exceptions/HDF5InternalErrorException.java | 6 +- .../hdf/hdf5lib/exceptions/HDF5JavaException.java | 8 +- .../hdf5lib/exceptions/HDF5LibraryException.java | 50 +- .../exceptions/HDF5LowLevelIOException.java | 6 +- .../exceptions/HDF5MetaDataCacheException.java | 6 +- .../exceptions/HDF5ObjectHeaderException.java | 6 +- .../HDF5PropertyListInterfaceException.java | 6 +- .../hdf5lib/exceptions/HDF5ReferenceException.java | 6 +- .../HDF5ResourceUnavailableException.java | 6 +- .../exceptions/HDF5SymbolTableException.java | 6 +- java/src/hdf/hdf5lib/exceptions/package-info.java | 1 + java/src/hdf/hdf5lib/package-info.java | 114 +- java/src/hdf/overview.html | 10 +- java/src/jni/exceptionImp.c | 12 +- java/src/jni/exceptionImp.h | 2 +- java/src/jni/h5Constants.c | 5 + java/test/TestH5.java | 2 +- release_docs/RELEASE.txt | 6 +- src/H5ACpublic.h | 2 +- src/H5Amodule.h | 346 +- src/H5Dmodule.h | 2956 +- src/H5Dpublic.h | 2 +- src/H5ESmodule.h | 88 +- src/H5Emodule.h | 514 +- src/H5Epublic.h | 8 +- src/H5Fmodule.h | 1443 +- src/H5Gmodule.h | 924 +- src/H5Gpublic.h | 10 +- src/H5Imodule.h | 7 +- src/H5Lmodule.h | 7 +- src/H5Mmodule.h | 16 +- src/H5Omodule.h | 7 +- src/H5Opublic.h | 6 +- src/H5PLmodule.h | 9 +- src/H5Pmodule.h | 960 +- src/H5Ppublic.h | 80 +- src/H5Rmodule.h | 27 +- src/H5Smodule.h | 1492 +- src/H5Tmodule.h | 3832 ++- src/H5VLmodule.h | 92 +- src/H5Zmodule.h | 7 +- src/H5module.h | 1404 +- 322 files changed, 38941 insertions(+), 15364 deletions(-) create mode 100644 doxygen/dox/FileFormatSpec.dox create mode 100644 doxygen/dox/IntroHDF5.dox create mode 100644 doxygen/dox/LearnBasics.dox create mode 100644 doxygen/dox/LearnBasics1.dox create mode 100644 doxygen/dox/LearnBasics2.dox create mode 100644 doxygen/dox/LearnBasics3.dox create mode 100644 doxygen/dox/LearnHDFView.dox create mode 100644 doxygen/dox/UsersGuide.dox create mode 100644 doxygen/dox/ViewTools.dox create mode 100644 doxygen/dox/ViewTools2.dox create mode 100644 doxygen/dox/ViewToolsJPSS.dox delete mode 100644 doxygen/dox/high_level/high_level.dox create mode 100644 doxygen/examples/core_menu.md create mode 100644 doxygen/examples/fortran_menu.md create mode 100644 doxygen/examples/high_level_menu.md create mode 100644 doxygen/examples/java_menu.md create mode 100644 doxygen/img/DataGroup.png create mode 100644 doxygen/img/Dmodel_fig1.gif create mode 100644 doxygen/img/Dmodel_fig10.gif create mode 100644 doxygen/img/Dmodel_fig11_b.gif create mode 100644 doxygen/img/Dmodel_fig12_a.gif create mode 100644 doxygen/img/Dmodel_fig12_b.gif create mode 100644 doxygen/img/Dmodel_fig14_a.gif create mode 100644 doxygen/img/Dmodel_fig14_b.gif create mode 100644 doxygen/img/Dmodel_fig14_c.gif create mode 100644 doxygen/img/Dmodel_fig14_d.gif create mode 100644 doxygen/img/Dmodel_fig2.gif create mode 100644 doxygen/img/Dmodel_fig3_a.gif create mode 100644 doxygen/img/Dmodel_fig3_c.gif create mode 100644 doxygen/img/Dmodel_fig4_a.gif create mode 100644 doxygen/img/Dmodel_fig4_b.gif create mode 100644 doxygen/img/Dmodel_fig5.gif create mode 100644 doxygen/img/Dmodel_fig6.gif create mode 100644 doxygen/img/Dmodel_fig7_b.gif create mode 100644 doxygen/img/Dmodel_fig8.gif create mode 100644 doxygen/img/Dmodel_fig9.gif create mode 100644 doxygen/img/Dsets_NbitFloating1.gif create mode 100644 doxygen/img/Dsets_NbitFloating2.gif create mode 100644 doxygen/img/Dsets_NbitInteger1.gif create mode 100644 doxygen/img/Dsets_NbitInteger2.gif create mode 100644 doxygen/img/Dsets_fig1.gif create mode 100644 doxygen/img/Dsets_fig10.gif create mode 100644 doxygen/img/Dsets_fig11.gif create mode 100644 doxygen/img/Dsets_fig12.gif create mode 100644 doxygen/img/Dsets_fig2.gif create mode 100644 doxygen/img/Dsets_fig3.gif create mode 100644 doxygen/img/Dsets_fig4.gif create mode 100644 doxygen/img/Dsets_fig5.gif create mode 100644 doxygen/img/Dsets_fig6.gif create mode 100644 doxygen/img/Dsets_fig7.gif create mode 100644 doxygen/img/Dsets_fig8.gif create mode 100644 doxygen/img/Dsets_fig9.gif create mode 100644 doxygen/img/Dspace_CvsF1.gif create mode 100644 doxygen/img/Dspace_CvsF2.gif create mode 100644 doxygen/img/Dspace_CvsF3.gif create mode 100644 doxygen/img/Dspace_CvsF4.gif create mode 100644 doxygen/img/Dspace_combine.gif create mode 100644 doxygen/img/Dspace_complex.gif create mode 100644 doxygen/img/Dspace_features.gif create mode 100644 doxygen/img/Dspace_features_cmpd.gif create mode 100644 doxygen/img/Dspace_move.gif create mode 100644 doxygen/img/Dspace_point.gif create mode 100644 doxygen/img/Dspace_read.gif create mode 100644 doxygen/img/Dspace_select.gif create mode 100644 doxygen/img/Dspace_separate.gif create mode 100644 doxygen/img/Dspace_simple.gif create mode 100644 doxygen/img/Dspace_subset.gif create mode 100644 doxygen/img/Dspace_three_datasets.gif create mode 100644 doxygen/img/Dspace_transfer.gif create mode 100644 doxygen/img/Dspace_write1to2.gif create mode 100644 doxygen/img/Dtypes_fig1.gif create mode 100644 doxygen/img/Dtypes_fig10.gif create mode 100644 doxygen/img/Dtypes_fig11.gif create mode 100644 doxygen/img/Dtypes_fig12.gif create mode 100644 doxygen/img/Dtypes_fig13a.gif create mode 100644 doxygen/img/Dtypes_fig13b.gif create mode 100644 doxygen/img/Dtypes_fig13c.gif create mode 100644 doxygen/img/Dtypes_fig13d.gif create mode 100644 doxygen/img/Dtypes_fig14.gif create mode 100644 doxygen/img/Dtypes_fig15.gif create mode 100644 doxygen/img/Dtypes_fig16.gif create mode 100644 doxygen/img/Dtypes_fig16a.gif create mode 100644 doxygen/img/Dtypes_fig16b.gif create mode 100644 doxygen/img/Dtypes_fig16c.gif create mode 100644 doxygen/img/Dtypes_fig16d.gif create mode 100644 doxygen/img/Dtypes_fig17a.gif create mode 100644 doxygen/img/Dtypes_fig17b.gif create mode 100644 doxygen/img/Dtypes_fig18.gif create mode 100644 doxygen/img/Dtypes_fig19.gif create mode 100644 doxygen/img/Dtypes_fig2.gif create mode 100644 doxygen/img/Dtypes_fig20a.gif create mode 100644 doxygen/img/Dtypes_fig20b.gif create mode 100644 doxygen/img/Dtypes_fig20c.gif create mode 100644 doxygen/img/Dtypes_fig20d.gif create mode 100644 doxygen/img/Dtypes_fig21.gif create mode 100644 doxygen/img/Dtypes_fig22.gif create mode 100644 doxygen/img/Dtypes_fig23.gif create mode 100644 doxygen/img/Dtypes_fig24.gif create mode 100644 doxygen/img/Dtypes_fig25a.gif create mode 100644 doxygen/img/Dtypes_fig25c.gif create mode 100644 doxygen/img/Dtypes_fig26.gif create mode 100644 doxygen/img/Dtypes_fig27.gif create mode 100644 doxygen/img/Dtypes_fig28.gif create mode 100644 doxygen/img/Dtypes_fig3.gif create mode 100644 doxygen/img/Dtypes_fig4.gif create mode 100644 doxygen/img/Dtypes_fig5.gif create mode 100644 doxygen/img/Dtypes_fig6.gif create mode 100644 doxygen/img/Dtypes_fig7.gif create mode 100644 doxygen/img/Dtypes_fig8.gif create mode 100644 doxygen/img/Dtypes_fig9.gif create mode 100644 doxygen/img/Files_fig3.gif create mode 100644 doxygen/img/Files_fig4.gif create mode 100644 doxygen/img/Groups_fig1.gif create mode 100644 doxygen/img/Groups_fig10_a.gif create mode 100644 doxygen/img/Groups_fig10_b.gif create mode 100644 doxygen/img/Groups_fig10_c.gif create mode 100644 doxygen/img/Groups_fig10_d.gif create mode 100644 doxygen/img/Groups_fig11_a.gif create mode 100644 doxygen/img/Groups_fig11_b.gif create mode 100644 doxygen/img/Groups_fig11_c.gif create mode 100644 doxygen/img/Groups_fig11_d.gif create mode 100644 doxygen/img/Groups_fig2.gif create mode 100644 doxygen/img/Groups_fig3.gif create mode 100644 doxygen/img/Groups_fig4.gif create mode 100644 doxygen/img/Groups_fig5.gif create mode 100644 doxygen/img/Groups_fig6.gif create mode 100644 doxygen/img/Groups_fig9_a.gif create mode 100644 doxygen/img/Groups_fig9_aa.gif create mode 100644 doxygen/img/Groups_fig9_b.gif create mode 100644 doxygen/img/Groups_fig9_bb.gif create mode 100644 doxygen/img/LBDsetSubRWProg.png create mode 100644 doxygen/img/Pmodel_fig2.gif create mode 100644 doxygen/img/Pmodel_fig3.gif create mode 100644 doxygen/img/Pmodel_fig5_a.gif create mode 100644 doxygen/img/Pmodel_fig5_b.gif create mode 100644 doxygen/img/Pmodel_fig5_c.gif create mode 100644 doxygen/img/Pmodel_fig5_d.gif create mode 100644 doxygen/img/Pmodel_fig5_e.gif create mode 100644 doxygen/img/Pmodel_fig6.gif create mode 100644 doxygen/img/PropListClassInheritance.gif create mode 100644 doxygen/img/PropListEcosystem.gif create mode 100644 doxygen/img/Shared_Attribute.jpg create mode 100644 doxygen/img/StormDataset.png create mode 100644 doxygen/img/UML_Attribute.jpg create mode 100644 doxygen/img/UML_FileAndProps.gif create mode 100644 doxygen/img/VFL_Drivers.gif create mode 100644 doxygen/img/cmpnddtype.png create mode 100644 doxygen/img/crtatt.png create mode 100644 doxygen/img/crtdset.png create mode 100644 doxygen/img/crtf-pic.png create mode 100644 doxygen/img/crtgrp.png create mode 100644 doxygen/img/dataset.png create mode 100644 doxygen/img/datasetwdata.png create mode 100644 doxygen/img/dataspace.png create mode 100644 doxygen/img/dataspace1.png create mode 100644 doxygen/img/datatype.png create mode 100644 doxygen/img/dtypes_fig25b.gif create mode 100644 doxygen/img/fileobj.png create mode 100644 doxygen/img/group.png create mode 100644 doxygen/img/hdfview-anthrstrm-img.png create mode 100644 doxygen/img/hdfview-anthrstrm-sprdsht.png create mode 100644 doxygen/img/hdfview-anthrstrm.png create mode 100644 doxygen/img/hdfview-imgicon.png create mode 100644 doxygen/img/hdfview-imgprop.png create mode 100644 doxygen/img/hdfview-imgsubset.png create mode 100644 doxygen/img/hdfview-newcmpd.png create mode 100644 doxygen/img/hdfview-newimgsubset.png create mode 100644 doxygen/img/hdfview-prop.png create mode 100644 doxygen/img/hdfview-qf.png create mode 100644 doxygen/img/hdfview-regref.png create mode 100644 doxygen/img/hdfview-regref1.png create mode 100644 doxygen/img/hdfview-regref2.png create mode 100644 doxygen/img/hdfview-regrefval.png create mode 100644 doxygen/img/hdfview-table.png create mode 100644 doxygen/img/hdfview-tree.png create mode 100644 doxygen/img/imgLBDsetCreate.gif create mode 100644 doxygen/img/imgLBDsetSubRW11.png create mode 100644 doxygen/img/imgLBDsetSubRW12.png create mode 100644 doxygen/img/imgLBDsetSubRW31.png create mode 100644 doxygen/img/imgLBDsetSubRW32.png create mode 100644 doxygen/img/imgLBDsetSubRW33.png create mode 100644 doxygen/img/imgLBFile.gif create mode 100644 doxygen/img/imggrpcreate.gif create mode 100644 doxygen/img/imggrpdsets.gif create mode 100644 doxygen/img/imggrps.gif create mode 100644 doxygen/img/newgroupimage.png create mode 100644 doxygen/img/noattrs.png create mode 100644 doxygen/img/properties.png create mode 100644 doxygen/img/scarletletter.png create mode 100644 doxygen/img/showasimage.png create mode 100644 doxygen/img/storm.png create mode 100644 doxygen/img/tutr-lochk.png create mode 100644 doxygen/img/tutr-lochks.png create mode 100644 doxygen/img/tutr-locons.png create mode 100755 doxygen/img/vol_architecture.png delete mode 100644 java/src/hdf/hdf5lib/HDF5GroupInfo.java diff --git a/CMakeLists.txt b/CMakeLists.txt index 0a0e4ca..35b345c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -12,6 +12,11 @@ if (POLICY CMP0083) cmake_policy (SET CMP0083 NEW) endif () +# Avoid warning about DOWNLOAD_EXTRACT_TIMESTAMP in CMake 3.24: +if (CMAKE_VERSION VERSION_GREATER_EQUAL "3.24.0") + cmake_policy(SET CMP0135 NEW) +endif() + #----------------------------------------------------------------------------- # Instructions for use : Normal Build # diff --git a/CTestConfig.cmake b/CTestConfig.cmake index 514a9d6..b780b86 100644 --- a/CTestConfig.cmake +++ b/CTestConfig.cmake @@ -12,7 +12,7 @@ ## This file should be placed in the root directory of your project. ## Then modify the CMakeLists.txt file in the root directory of your ## project to incorporate the testing dashboard. -## # The following are required to use Dart and the CDash dashboard. +## # The following are required to use Dart and the CDash dashboard ## ENABLE_TESTING() ## INCLUDE(CTest) set (CTEST_PROJECT_NAME "HDF5") diff --git a/config/cmake/UseJava.cmake b/config/cmake/UseJava.cmake index 2351ce8..1de08db 100644 --- a/config/cmake/UseJava.cmake +++ b/config/cmake/UseJava.cmake @@ -1448,6 +1448,7 @@ function(create_javadoc _target) add_custom_target(${_target}_javadoc ALL COMMAND ${Java_JAVADOC_EXECUTABLE} + -Xdoclint:none ${_javadoc_options} ${_javadoc_files} ${_javadoc_packages} diff --git a/configure.ac b/configure.ac index e2fbf04..ab177fc 100644 --- a/configure.ac +++ b/configure.ac @@ -1213,6 +1213,7 @@ if test "X$HDF5_DOXYGEN" = "Xyes"; then AC_SUBST([DOXYGEN_PACKAGE]) AC_SUBST([DOXYGEN_VERSION_STRING]) + AC_SUBST([DOXYGEN_DIR]) AC_SUBST([DOXYGEN_INCLUDE_ALIASES]) AC_SUBST([DOXYGEN_PROJECT_LOGO]) AC_SUBST([DOXYGEN_PROJECT_BRIEF]) @@ -1237,6 +1238,7 @@ if test "X$HDF5_DOXYGEN" = "Xyes"; then # SRCDIR Environment variables used inside doxygen macro for the source location: DOXYGEN_PACKAGE=${PACKAGE_NAME} DOXYGEN_VERSION_STRING=${PACKAGE_VERSION} + DOXYGEN_DIR='$(SRCDIR)/doxygen' DOXYGEN_INCLUDE_ALIASES='$(SRCDIR)/doxygen/aliases' DOXYGEN_PROJECT_LOGO='$(SRCDIR)/doxygen/img/HDFG-logo.png' DOXYGEN_PROJECT_BRIEF='' @@ -1249,14 +1251,14 @@ if test "X$HDF5_DOXYGEN" = "Xyes"; then DOXYGEN_HTML_HEADER='$(SRCDIR)/doxygen/hdf5_header.html' DOXYGEN_HTML_FOOTER='$(SRCDIR)/doxygen/hdf5_footer.html' DOXYGEN_HTML_EXTRA_STYLESHEET='$(SRCDIR)/doxygen/hdf5doxy.css' - DOXYGEN_HTML_EXTRA_FILES='$(SRCDIR)/doxygen/hdf5_navtree_hacks.js $(SRCDIR)/doxygen/img/FF-IH_FileGroup.gif $(SRCDIR)/doxygen/img/FF-IH_FileObject.gif $(SRCDIR)/doxygen/img/FileFormatSpecChunkDiagram.jpg $(SRCDIR)/doxygen/img/ftv2node.png $(SRCDIR)/doxygen/img/ftv2pnode.png $(SRCDIR)/doxygen/img/HDFG-logo.png $(SRCDIR)/doxygen/img/IOFlow2.gif $(SRCDIR)/doxygen/img/IOFlow3.gif $(SRCDIR)/doxygen/img/IOFlow.gif $(SRCDIR)/doxygen/img/PaletteExample1.gif $(SRCDIR)/doxygen/img/Palettes.fm.anc.gif' + DOXYGEN_HTML_EXTRA_FILES='$(SRCDIR)/doxygen/hdf5_navtree_hacks.js' DOXYGEN_TAG_FILE=hdf5.tag DOXYGEN_SERVER_BASED_SEARCH=NO DOXYGEN_EXTERNAL_SEARCH=NO DOXYGEN_SEARCHENGINE_URL= DOXYGEN_STRIP_FROM_PATH='$(SRCDIR)' DOXYGEN_STRIP_FROM_INC_PATH='$(SRCDIR)' - DOXYGEN_PREDEFINED='H5_HAVE_DIRECT H5_HAVE_LIBHDFS H5_HAVE_MAP_API H5_HAVE_PARALLEL H5_HAVE_ROS3_VFD' + DOXYGEN_PREDEFINED='H5_HAVE_DIRECT H5_HAVE_LIBHDFS H5_HAVE_MAP_API H5_HAVE_PARALLEL H5_HAVE_ROS3_VFD H5_DOXYGEN_FORTRAN' DX_INIT_DOXYGEN([HDF5], [./doxygen/Doxyfile], [hdf5lib_docs]) fi diff --git a/doxygen/CMakeLists.txt b/doxygen/CMakeLists.txt index c1a2071..86d34a3 100644 --- a/doxygen/CMakeLists.txt +++ b/doxygen/CMakeLists.txt @@ -7,11 +7,12 @@ project (HDF5_DOXYGEN C) if (DOXYGEN_FOUND) set (DOXYGEN_PACKAGE ${HDF5_PACKAGE_NAME}) set (DOXYGEN_VERSION_STRING ${HDF5_PACKAGE_VERSION_STRING}) + set (DOXYGEN_DIR ${HDF5_DOXYGEN_DIR}) set (DOXYGEN_INCLUDE_ALIASES_PATH ${HDF5_DOXYGEN_DIR}) set (DOXYGEN_INCLUDE_ALIASES aliases) set (DOXYGEN_VERBATIM_VARS DOXYGEN_INCLUDE_ALIASES) set (DOXYGEN_PROJECT_LOGO ${HDF5_DOXYGEN_DIR}/img/HDFG-logo.png) - set (DOXYGEN_PROJECT_BRIEF "C-API Reference") + set (DOXYGEN_PROJECT_BRIEF "API Reference") set (DOXYGEN_INPUT_DIRECTORY "${HDF5_SOURCE_DIR} ${HDF5_DOXYGEN_DIR}/dox ${HDF5_GENERATED_SOURCE_DIR}") set (DOXYGEN_OPTIMIZE_OUTPUT_FOR_C YES) set (DOXYGEN_MACRO_EXPANSION YES) @@ -28,7 +29,7 @@ if (DOXYGEN_FOUND) set (DOXYGEN_SEARCHENGINE_URL) set (DOXYGEN_STRIP_FROM_PATH ${HDF5_SOURCE_DIR}) set (DOXYGEN_STRIP_FROM_INC_PATH ${HDF5_SOURCE_DIR}) - set (DOXYGEN_PREDEFINED "H5_HAVE_DIRECT H5_HAVE_LIBHDFS H5_HAVE_MAP_API H5_HAVE_PARALLEL H5_HAVE_ROS3_VFD H5_HAVE_SUBFILING_VFD H5_HAVE_IOC_VFD") + set (DOXYGEN_PREDEFINED "H5_HAVE_DIRECT H5_HAVE_LIBHDFS H5_HAVE_MAP_API H5_HAVE_PARALLEL H5_HAVE_ROS3_VFD H5_DOXYGEN_FORTRAN H5_HAVE_SUBFILING_VFD H5_HAVE_IOC_VFD") # This configure and individual custom targets work together # Replace variables inside @@ with the current values diff --git a/doxygen/Doxyfile.in b/doxygen/Doxyfile.in index 7657fa5..08f5545 100644 --- a/doxygen/Doxyfile.in +++ b/doxygen/Doxyfile.in @@ -280,13 +280,13 @@ OPTIMIZE_OUTPUT_FOR_C = YES # qualified scopes will look different, etc. # The default value is: NO. -OPTIMIZE_OUTPUT_JAVA = NO +OPTIMIZE_OUTPUT_JAVA = YES # Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran # sources. Doxygen will then generate output that is tailored for Fortran. # The default value is: NO. -OPTIMIZE_FOR_FORTRAN = NO +OPTIMIZE_FOR_FORTRAN = YES # Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL # sources. Doxygen will then generate output that is tailored for VHDL. @@ -875,7 +875,11 @@ FILE_PATTERNS = H5*public.h \ H5VLconnector.h \ H5VLconnector_passthru.h \ H5VLnative.h \ + H5Zdevelop.h \ H5version.h \ + H5*.java \ + HDF*.java \ + *.F90 \ *.dox # The RECURSIVE tag can be used to specify whether or not subdirectories should @@ -944,7 +948,7 @@ EXAMPLE_RECURSIVE = NO # that contain images that are to be included in the documentation (see the # \image command). -IMAGE_PATH = @HDF5_DOXYGEN_DIR@/img +IMAGE_PATH = @DOXYGEN_DIR@/img # The INPUT_FILTER tag can be used to specify a program that doxygen should # invoke to filter for each input file. Doxygen will invoke the filter program diff --git a/doxygen/aliases b/doxygen/aliases index 6730be5..5ee60d5 100644 --- a/doxygen/aliases +++ b/doxygen/aliases @@ -379,4 +379,4 @@ ALIASES += obj_info_fields=" - - +
FlagPurpose
::= H5T_REFERENCE { } - ::= H5T_STD_REF_OBJECT | H5T_STD_REF_DSETREG | H5T_STD_REF | UNDEFINED + ::= H5T_STD_REF_OBJECT | H5T_STD_REF_DSETREG ::= H5T_COMPOUND { + diff --git a/doxygen/dox/FileFormatSpec.dox b/doxygen/dox/FileFormatSpec.dox new file mode 100644 index 0000000..fc10574 --- /dev/null +++ b/doxygen/dox/FileFormatSpec.dox @@ -0,0 +1,23 @@ +/** \page FMT3 HDF5 File Format Specification Version 3.0 + +\htmlinclude H5.format.html + +*/ + +/** \page FMT2 HDF5 File Format Specification Version 2.0 + +\htmlinclude H5.format.2.0.html + +*/ + +/** \page FMT11 HDF5 File Format Specification Version 1.1 + +\htmlinclude H5.format.1.1.html + +*/ + +/** \page FMT1 HDF5 File Format Specification Version 1.0 + +\htmlinclude H5.format.1.0.html + +*/ \ No newline at end of file diff --git a/doxygen/dox/GettingStarted.dox b/doxygen/dox/GettingStarted.dox index 880491d..29c5033 100644 --- a/doxygen/dox/GettingStarted.dox +++ b/doxygen/dox/GettingStarted.dox @@ -1,3 +1,100 @@ -/** \page GettingStarted \Code{Hello, HDF5!} +/** @page GettingStarted Getting Started with HDF5 - */ \ No newline at end of file +Navigate back: \ref index "Main" +
+ +\section sec_learn Learning HDF5 +There are several resources for learning about HDF5. The HDF Group provides an on-line HDF5 tutorial, +documentation, examples, and videos. There are also tutorials provided by other organizations that are very useful for learning about HDF5. + +\subsection subsec_learn_intro The HDF Group Resources +For a quick introduction to HDF5 see the following: + + + + + + + + + + + + + +
+@ref IntroHDF5 + +A very brief introduction to HDF5 and the HDF5 programming model and APIs +
+@ref LearnHDFView + +A tutorial for learning how to use HDFView. NO programming involved! +
+@ref LearnBasics + +Step by step instructions for learning HDF5 that include programming examples +
+ +\subsection subsec_learn_tutor The HDF Group Tutorials and Examples +These tutorials and examples are available for learning about the HDF5 High Level APIs, tools, +Parallel HDF5, and the HDF5-1.10 VDS and SWMR new features: + + + + + + + + + + + + + + + + + + + + + + + + + +
+Using the High Level APIs + +\ref H5LT \ref H5IM \ref H5TB \ref H5PT \ref H5DS +
+Introduction to Parallel HDF5 + +A brief introduction to Parallel HDF5. If you are new to HDF5 please see the @ref LearnBasics topic first. +
+\ref ViewTools + +\li @ref LearnHDFView +\li @ref ViewToolsCommand +\li @ref ViewToolsJPSS +
+HDF5-1.10 New Features + +\li Introduction to the Virtual Dataset - VDS +\li Introduction to Single-Writer/Multiple-Reader (SWMR) +
+Example Programs + +\ref HDF5Examples +
+Videos + +\li Introduction to HDF5 +\li Parallel HDF5 +
+ +
+Navigate back: \ref index "Main" + +*/ diff --git a/doxygen/dox/IntroHDF5.dox b/doxygen/dox/IntroHDF5.dox new file mode 100644 index 0000000..ec46217 --- /dev/null +++ b/doxygen/dox/IntroHDF5.dox @@ -0,0 +1,627 @@ +/** @page IntroHDF5 Introduction to HDF5 + +Navigate back: \ref index "Main" / \ref GettingStarted +
+ +\section sec_intro_desc HDF5 Description +HDF5 consists of a file format for storing HDF5 data, a data model for logically organizing and accessing +HDF5 data from an application, and the software (libraries, language interfaces, and tools) for working with this format. + +\subsection subsec_intro_desc_file File Format +HDF5 consists of a file format for storing HDF5 data, a data model for logically organizing and accessing HDF5 data from an application, +and the software (libraries, language interfaces, and tools) for working with this format. + +\subsection subsec_intro_desc_dm Data Model +The HDF5 Data Model, also known as the HDF5 Abstract (or Logical) Data Model consists of +the building blocks for data organization and specification in HDF5. + +An HDF5 file (an object in itself) can be thought of as a container (or group) that holds +a variety of heterogeneous data objects (or datasets). The datasets can be images, tables, +graphs, and even documents, such as PDF or Excel: + + + + + +
+\image html fileobj.png +
+ +The two primary objects in the HDF5 Data Model are groups and datasets. + +There are also a variety of other objects in the HDF5 Data Model that support groups and datasets, +including datatypes, dataspaces, properties and attributes. + +\subsubsection subsec_intro_desc_dm_group Groups +HDF5 groups (and links) organize data objects. Every HDF5 file contains a root group that can +contain other groups or be linked to objects in other files. + + + + + + +
There are two groups in the HDF5 file depicted above: Viz and SimOut. +Under the Viz group are a variety of images and a table that is shared with the SimOut group. +The SimOut group contains a 3-dimensional array, a 2-dimensional array and a link to a 2-dimensional +array in another HDF5 file.
+\image html group.png +
+ +Working with groups and group members is similar in many ways to working with directories and files +in UNIX. As with UNIX directories and files, objects in an HDF5 file are often described by giving +their full (or absolute) path names. +\li / signifies the root group. +\li /foo signifies a member of the root group called foo. +\li /foo/zoo signifies a member of the group foo, which in turn is a member of the root group. + +\subsubsection subsec_intro_desc_dm_dset Datasets +HDF5 datasets organize and contain the “raw” data values. A dataset consists of metadata +that describes the data, in addition to the data itself: + + + + + + +
In this picture, the data is stored as a three dimensional dataset of size 4 x 5 x 6 with an integer datatype. +It contains attributes, Time and Pressure, and the dataset is chunked and compressed.
+\image html dataset.png +
+ +Datatypes, dataspaces, properties and (optional) attributes are HDF5 objects that describe a dataset. +The datatype describes the individual data elements. + +\subsection subsec_intro_desc_props Datatypes, Dataspaces, Properties and Attributes + +\subsubsection subsec_intro_desc_prop_dtype Datatypes +The datatype describes the individual data elements in a dataset. It provides complete information for +data conversion to or from that datatype. + + + + + + +
In the dataset depicted, each element of the dataset is a 32-bit integer.
+\image html datatype.png +
+ +Datatypes in HDF5 can be grouped into: +
    +
  • +Pre-Defined Datatypes: These are datatypes that are created by HDF5. They are actually opened (and closed) +by HDF5 and can have different values from one HDF5 session to the next. There are two types of pre-defined datatypes: +
      +
    • +Standard datatypes are the same on all platforms and are what you see in an HDF5 file. Their names are of the form +H5T_ARCH_BASE where ARCH is an architecture name and BASE is a pro­gramming type name. For example, #H5T_IEEE_F32BE +indicates a standard Big Endian floating point type. +
    • +
    • +Native datatypes are used to simplify memory operations (reading, writing) and are NOT the same on different platforms. +For example, #H5T_NATIVE_INT indicates an int (C). +
    • +
    +
  • +
  • +Derived Datatypes: These are datatypes that are created or derived from the pre-defined datatypes. +An example of a commonly used derived datatype is a string of more than one character. Compound datatypes +are also derived types. A compound datatype can be used to create a simple table, and can also be nested, +in which it includes one more other compound datatypes. + + + + + +
    This is an example of a dataset with a compound datatype. Each element in the dataset consists +of a 16-bit integer, a character, a 32-bit integer, and a 2x3x2 array of 32-bit floats (the datatype). +It is a 2-dimensional 5 x 3 array (the dataspace). The datatype should not be confused with the dataspace. +
    +\image html cmpnddtype.png +
    +
  • +
+ +\subsubsection subsec_intro_desc_prop_dspace Dataspaces +A dataspace describes the layout of a dataset’s data elements. It can consist of no elements (NULL), +a single element (scalar), or a simple array. + + + + + + +
This image illustrates a dataspace that is an array with dimensions of 5 x 3 and a rank (number of dimensions) of 2.
+\image html dataspace1.png +
+ +A dataspace can have dimensions that are fixed (unchanging) or unlimited, which means they can grow +in size (i.e. they are extendible). + +There are two roles of a dataspace: +\li It contains the spatial information (logical layout) of a dataset stored in a file. This includes the rank and dimensions of a dataset, which are a permanent part of the dataset definition. +\li It describes an application’s data buffers and data elements participating in I/O. In other words, it can be used to select a portion or subset of a dataset. + + + + + + +
The dataspace is used to describe both the logical layout of a dataset and a subset of a dataset.
+\image html dataspace.png +
+ +\subsubsection subsec_intro_desc_prop_property Properties +A property is a characteristic or feature of an HDF5 object. There are default properties which +handle the most common needs. These default properties can be modified using the HDF5 Property +List API to take advantage of more powerful or unusual features of HDF5 objects. + + + + + +
+\image html properties.png +
+ +For example, the data storage layout property of a dataset is contiguous by default. For better +performance, the layout can be modified to be chunked or chunked and compressed: + +\subsubsection subsec_intro_desc_prop_attr Attributes +Attributes can optionally be associated with HDF5 objects. They have two parts: a name and a value. +Attributes are accessed by opening the object that they are attached to so are not independent objects. +Typically an attribute is small in size and contains user metadata about the object that it is attached to. + +Attributes look similar to HDF5 datasets in that they have a datatype and dataspace. However, they +do not support partial I/O operations, and they cannot be compressed or extended. + +\subsection subsec_intro_desc_soft HDF5 Software +The HDF5 software is written in C and includes optional wrappers for C++, FORTRAN (90 and F2003), +and Java. The HDF5 binary distribution consists of the HDF5 libraries, include files, command-line +utilities, scripts for compiling applications, and example programs. + +\subsubsection subsec_intro_desc_soft_apis HDF5 APIs and Libraries +There are APIs for each type of object in HDF5. For example, all C routines in the HDF5 library +begin with a prefix of the form H5*, where * is one or two uppercase letters indicating the type +of object on which the function operates: +\li @ref H5A Attribute Interface +\li @ref H5D Dataset Interface +\li @ref H5F File Interface + +The HDF5 High Level APIs simplify many of the steps required to create and access objects, as well +as providing templates for storing objects. Following is a list of the High Level APIs: +\li @ref H5LT – simplifies steps in creating datasets and attributes +\li @ref H5IM – defines a standard for storing images in HDF5 +\li @ref H5TB – condenses the steps required to create tables +\li @ref H5DS – provides a standard for dimension scale storage +\li @ref H5PT – provides a standard for storing packet data + +\subsubsection subsec_intro_desc_soft_tools Tools +Useful tools for working with HDF5 files include: +\li h5dump: A utility to dump or display the contents of an HDF5 File +\li h5cc, h5c++, h5fc: Unix scripts for compiling applications +\li HDFView: A java browser to view HDF (HDF4 and HDF5) files + +

h5dump

+The h5dump utility displays the contents of an HDF5 file in Data Description Language (\ref DDLBNF110). +Below is an example of h5dump output for an HDF5 file that contains no objects: +\code +$ h5dump file.h5 + HDF5 "file.h5" { + GROUP "/" { + } + } +\endcode + +With large files and datasets the output from h5dump can be overwhelming. +There are options that can be used to examine specific parts of an HDF5 file. +Some useful h5dump options are included below: +\code + -H, --header Display header information only (no data) + -d Display a dataset with a specified path and name + -p Display properties + -n Display the contents of the file +\endcode + +

h5cc, h5fc, h5c++

+The built HDF5 binaries include the h5cc, h5fc, h5c++ compile scripts for compiling applications. +When using these scripts there is no need to specify the HDF5 libraries and include files. +Compiler options can be passed to the scripts. + +

HDFView

+The HDFView tool allows browsing of data in HDF (HDF4 and HDF5) files. + +\section sec_intro_pm Introduction to the HDF5 Programming Model and APIs +The HDF5 Application Programming Interface is extensive, but a few functions do most of the work. + +To introduce the programming model, examples in Python and C are included below. The Python examples +use the HDF5 Python APIs (h5py). See the Examples from "Learning the Basics" page for complete examples +that can be downloaded and run for C, FORTRAN, C++, Java and Python. + +The general paradigm for working with objects in HDF5 is to: +\li Open the object. +\li Access the object. +\li Close the object. + +The library imposes an order on the operations by argument dependencies. For example, a file must be +opened before a dataset because the dataset open call requires a file handle as an argument. Objects +can be closed in any order. However, once an object is closed it no longer can be accessed. + +Keep the following in mind when looking at the example programs included in this section: +
    +
  • +
      +
    • +C routines begin with the prefix “H5*” where * is a single letter indicating the object on which the +operation is to be performed. +
    • +
    • +FORTRAN routines are similar; they begin with “h5*” and end with “_f”. +
    • +
    • +Java routines are similar; the routine names begin with “H5*” and are prefixed with “H5.” as the class. Constants are +in the HDF5Constants class and are prefixed with "HDF5Constants.". The function arguments +are usually similar, @see @ref HDF5LIB +
    • +
    +For example: +
      +
    • +File Interface:
      • #H5Fopen (C)
      • h5fopen_f (FORTRAN)
      • H5.H5Fopen (Java)
      +
    • +
    • +Dataset Interface:
      • #H5Dopen (C)
      • h5dopen_f (FORTRAN)
      • H5.H5Dopen (Java)
      +
    • +
    • +Dataspace interface:
      • #H5Sclose (C)
      • h5sclose_f (FORTRAN)
      • H5.H5Sclose (Java)
      +
    • +
    +The HDF5 Python APIs use methods associated with specific objects. +
  • +
  • +For portability, the HDF5 library has its own defined types. Some common types that you will see +in the example code are: +
      +
    • +#hid_t is used for object handles +
    • +
    • +hsize_t is used for dimensions +
    • +
    • +#herr_t is used for many return values +
    • +
    +
  • +
  • +Language specific files must be included in applications: +
      +
    • +Python: Add "import h5py / import numpy" +
    • +
    • +C: Add "#include hdf5.h" +
    • +
    • +FORTRAN: Add "USE HDF5" and call h5open_f and h5close_f to initialize and close the HDF5 FORTRAN interface +
    • +
    • +Java: Add "import hdf.hdf5lib.H5; + import hdf.hdf5lib.HDF5Constants;" +
    • +
    +
  • +
+ +\subsection subsec_intro_pm_file Steps to create a file +To create an HDF5 file you must: +\li Specify property lists (or use the defaults). +\li Create the file. +\li Close the file (and property lists if needed). + +Example: + + + + + +
The following Python and C examples create a file, file.h5, and then close it. +The resulting HDF5 file will only contain a root group:
+\image html crtf-pic.png +
+ +Calling h5py.File with ‘w’ for the file access flag will create a new HDF5 file and overwrite +an existing file with the same name. “file” is the file handle returned from opening the file. +When finished with the file, it must be closed. When not specifying property lists, the default +property lists are used: + + + + + +
+Python +\code + import h5py + file = h5py.File (‘file.h5’, ‘w’) + file.close () +\endcode +
+ +The H5Fcreate function creates an HDF5 file. #H5F_ACC_TRUNC is the file access flag to create a new +file and overwrite an existing file with the same name, and #H5P_DEFAULT is the value specified to +use a default property list. + + + + + +
+C +\code + #include “hdf5.h” + + int main() { + hid_t file_id; + herr_t status; + + file_id = H5Fcreate ("file.h5", H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + status = H5Fclose (file_id); + } +\endcode +
+ +\subsection subsec_intro_pm_dataset Steps to create a dataset +As described previously, an HDF5 dataset consists of the raw data, as well as the metadata that +describes the data (datatype, spatial information, and properties). To create a dataset you must: +\li Define the dataset characteristics (datatype, dataspace, properties). +\li Decide which group to attach the dataset to. +\li Create the dataset. +\li Close the dataset handle from step 3. + +Example: + + + + + +
The code excerpts below show the calls that need to be made to create a 4 x 6 integer dataset dset +in a file dset.h5. The dataset will be located in the root group:
+\image html crtdset.png +
+ +With Python, the creation of the dataspace is included as a parameter in the dataset creation method. +Just one call will create a 4 x 6 integer dataset dset. A pre-defined Big Endian 32-bit integer datatype +is specified. The create_dataset method creates the dataset in the root group (the file object). +The dataset is close by the Python interface. + + + + + +
+Python +\code + dataset = file.create_dataset("dset",(4, 6), h5py.h5t.STD_I32BE) +\endcode +
+ +To create the same dataset in C, you must specify the dataspace with the #H5Screate_simple function, +create the dataset by calling #H5Dcreate, and then close the dataspace and dataset with calls to #H5Dclose +and #H5Sclose. #H5P_DEFAULT is specified to use a default property list. Note that the file identifier +(file_id) is passed in as the first parameter to #H5Dcreate, which creates the dataset in the root group. + + + + + +
+C +\code + // Create the dataspace for the dataset. + dims[0] = 4; + dims[1] = 6; + + dataspace_id = H5Screate_simple(2, dims, NULL); + + // Create the dataset. + dataset_id = H5Dcreate (file_id, "/dset", H5T_STD_I32BE, dataspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + + // Close the dataset and dataspace + status = H5Dclose(dataset_id); + status = H5Sclose(dataspace_id); +\endcode +
+ +\subsection subsec_intro_pm_write Writing to or reading from a dataset +Once you have created or opened a dataset you can write to it: + + + + + +
+Python +\code + data = np.zeros((4,6)) + for i in range(4): + for j in range(6): + data[i][j]= i*6+j+1 + + dataset[...] = data <-- Write data to dataset + data_read = dataset[...] <-- Read data from dataset +\endcode +
+ +#H5S_ALL is passed in for the memory and file dataspace parameters to indicate that the entire dataspace +of the dataset is specified. These two parameters can be modified to allow subsetting of a dataset. +The native predefined datatype, #H5T_NATIVE_INT, is used for reading and writing so that HDF5 will do +any necessary integer conversions: + + + + + +
+C +\code + status = H5Dwrite (dataset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, dset_data); + status = H5Dread (dataset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, dset_data); +\endcode +
+ +\subsection subsec_intro_pm_group Steps to create a group +An HDF5 group is a structure containing zero or more HDF5 objects. Before you can create a group you must +obtain the location identifier of where the group is to be created. Following are the steps that are required: +\li Decide where to put the group – in the “root group” (or file identifier) or in another group. Open the group if it is not already open. +\li Define properties or use the default. +\li Create the group. +\li Close the group. + + + + + + +
Creates attributes that are attached to the dataset dset
+\image html crtgrp.png +
+ +The code below opens the dataset dset.h5 with read/write permission and creates a group MyGroup in the root group. +Properties are not specified so the defaults are used: + + + + + +
+Python +\code + import h5py + file = h5py.File('dset.h5', 'r+') + group = file.create_group ('MyGroup') + file.close() +\endcode +
+ +To create the group MyGroup in the root group, you must call #H5Gcreate, passing in the file identifier returned +from opening or creating the file. The default property lists are specified with #H5P_DEFAULT. The group is then +closed: + + + + + +
+C +\code + group_id = H5Gcreate (file_id, "MyGroup", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + status = H5Gclose (group_id); +\endcode +
+ +\subsection subsec_intro_pm_attr Steps to create and write to an attribute +To create an attribute you must open the object that you wish to attach the attribute to. Then you can create, +access, and close the attribute as needed: +\li Open the object that you wish to add an attribute to. +\li Create the attribute +\li Write to the attribute +\li Close the attribute and the object it is attached to. + + + + + + +
Creates attributes that are attached to the dataset dset
+\image html crtatt.png +
+ +The dataspace, datatype, and data are specified in the call to create an attribute in Python: + + + + + +
+Python +\code + dataset.attrs["Units"] = “Meters per second” <-- Create string + attr_data = np.zeros((2,)) + attr_data[0] = 100 + attr_data[1] = 200 + dataset.attrs.create("Speed", attr_data, (2,), “i”) <-- Create Integer +\endcode +
+ +To create an integer attribute in C, you must create the dataspace, create the attribute, write +to it and then close it in separate steps: + + + + + +
+C +\code + hid_t attribute_id, dataspace_id; // identifiers + hsize_t dims; + int attr_data[2]; + herr_t status; + + ... + + // Initialize the attribute data. + attr_data[0] = 100; + attr_data[1] = 200; + + // Create the data space for the attribute. + dims = 2; + dataspace_id = H5Screate_simple(1, &dims, NULL); + + // Create a dataset attribute. + attribute_id = H5Acreate2 (dataset_id, "Units", H5T_STD_I32BE, + dataspace_id, H5P_DEFAULT, H5P_DEFAULT); + + // Write the attribute data. + status = H5Awrite(attribute_id, H5T_NATIVE_INT, attr_data); + + // Close the attribute. + status = H5Aclose(attribute_id); + + // Close the dataspace. + status = H5Sclose(dataspace_id); +\endcode +
+ +
+Navigate back: \ref index "Main" / \ref GettingStarted + + +@page HDF5Examples HDF5 Examples +Example programs of how to use HDF5 are provided below. +For HDF-EOS specific examples, see the examples +of how to access and visualize NASA HDF-EOS files using IDL, MATLAB, and NCL on the +HDF-EOS Tools and Information Center page. + +\section secHDF5Examples Examples +\li \ref LBExamples +\li Examples by API +\li Examples in the Source Code +\li Other Examples + +\section secHDF5ExamplesCompile How To Compile +For information on compiling in C, C++ and Fortran, see: \ref LBCompiling + +\section secHDF5ExamplesOther Other Examples +IDL, MATLAB, and NCL Examples for HDF-EOS +Examples of how to access and visualize NASA HDF-EOS files using IDL, MATLAB, and NCL. + +Miscellaneous Examples +These (very old) examples resulted from working with users, and are not fully tested. Most of them are in C, with a few in Fortran and Java. + +Using Special Values +These examples show how to create special values in an HDF5 application. + +*/ diff --git a/doxygen/dox/LearnBasics.dox b/doxygen/dox/LearnBasics.dox new file mode 100644 index 0000000..298672d --- /dev/null +++ b/doxygen/dox/LearnBasics.dox @@ -0,0 +1,183 @@ +/** @page LearnBasics Learning the Basics + +Navigate back: \ref index "Main" / \ref GettingStarted +
+ +\section secIntro Introduction +The following topics cover the basic features in HDF5. The topics build on each other and are +intended to be completed in order. Some sections use files created in earlier sections. The +examples used can also be found on the \ref LBExamples +page and in the HDF5 source code (C, C++, Fortran). + +\section Topics Topics +\li @subpage LBFileOrg +\li @subpage LBAPI +\li @subpage LBProg +\li @subpage LBFileCreate +\li @subpage LBDsetCreate +\li @subpage LBDsetRW +\li @subpage LBAttrCreate +\li @subpage LBGrpCreate +\li @subpage LBGrpCreateNames +\li @subpage LBGrpDset +\li @subpage LBDsetSubRW +\li @subpage LBDatatypes +\li @subpage LBPropsList +\li @subpage LBDsetLayout +\li @subpage LBExtDset +\li @subpage LBComDset +\li @subpage LBContents +\li @subpage LBQuiz +\li @subpage LBQuizAnswers +\li @subpage LBCompiling +\li @subpage LBTraining + +
+Navigate back: \ref index "Main" / \ref GettingStarted + + +@page LBExamples Examples from Learning the Basics + +Navigate back: \ref index "Main" / \ref GettingStarted / \ref LearnBasics +
+ +\section secLBExamples +These examples are used in the \ref LearnBasics topic. See \ref LBCompiling for details on compiling them. +PLEASE NOTE that the example programs are listed in the order they are expected to be run. Some example +programs use files created in earlier examples. + +\section secLBExamplesSrc HDF5 Source Code Examples +These examples (C, C++, Fortran) are provided in the HDF5 source code and (Unix) binaries. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Feature +Examples +Comments +
Create a file +C Fortran C++ Java Python + +
Create a dataset +C Fortran C++ Java Python + +
Read and write to a dataset +C Fortran C++ Java Python + +
Create an attribute +C Fortran C++ Java Python + +
Create a group +C Fortran C++ Java Python + +
Create groups in a file using absolute and relative paths +C Fortran C++ Java Python + +
Create datasets in a group +C Fortran C++ Java Python + +
Create a file and dataset and select/read a subset from the dataset +C Fortran C++ Java Python +Also see examples to Write by row (and column) below. +
Create an extendible (unlimited dimension) dataset +C Fortran C++ Java Python +Also see examples to Extend by row (and column) below +
Create a chunked and compressed dataset +C Fortran C++ Java Python + +
+ +*See HDF5Mathematica for user-contributed +HDF5 Mathematica Wrappers and Introductory Tutorial Examples. The examples use P/Invoke. + +\section secLBExamplesAddl Additional Examples +These examples make minor changes to the tutorial examples. + + + + + + + + + + + + + + + + + + + + + +
Feature +Examples +
Write by row +C Fortran +
Write by column +C Fortran +
Extend by row +C Fortran +
Extend by column +C Fortran +
+ + +
+Navigate back: \ref index "Main" / \ref GettingStarted / \ref LearnBasics + +*/ diff --git a/doxygen/dox/LearnBasics1.dox b/doxygen/dox/LearnBasics1.dox new file mode 100644 index 0000000..a9b6d0e --- /dev/null +++ b/doxygen/dox/LearnBasics1.dox @@ -0,0 +1,1023 @@ +/** @page LBFileOrg HDF5 File Organization + +Navigate back: \ref index "Main" / \ref GettingStarted / \ref LearnBasics +
+ +\section secLBFileOrg HDF5 file +An HDF5 file is a container for storing a variety of scientific data and is composed of two primary types of objects: groups and datasets. + +\li HDF5 group: a grouping structure containing zero or more HDF5 objects, together with supporting metadata +\li HDF5 dataset: a multidimensional array of data elements, together with supporting metadata + +Any HDF5 group or dataset may have an associated attribute list. An HDF5 attribute is a user-defined HDF5 structure +that provides extra information about an HDF5 object. + +Working with groups and datasets is similar in many ways to working with directories and files in UNIX. As with UNIX +directories and files, an HDF5 object in an HDF5 file is often referred to by its full path name (also called an absolute path name). + +\li / signifies the root group. + +\li /foo signifies a member of the root group called foo. + +\li /foo/zoo signifies a member of the group foo, which in turn is a member of the root group. + +Navigate back: \ref index "Main" / \ref GettingStarted / \ref LearnBasics + +@page LBAPI The HDF5 API + +Navigate back: \ref index "Main" / \ref GettingStarted / \ref LearnBasics +
+ +\section secLBAPI HDF5 C API +The HDF5 library provides several interfaces, or APIs. These APIs provide routines for creating, +accessing, and manipulating HDF5 files and objects. + +The library itself is implemented in C. To facilitate the work of FORTRAN 90, C++ and Java programmers, +HDF5 function wrappers have been developed in each of these languages. This tutorial discusses the use +of the C functions and the FORTRAN wrappers. + +All C routines in the HDF5 library begin with a prefix of the form H5*, where * is one or two uppercase +letters indicating the type of object on which the function operates. +The FORTRAN wrappers come in the form of subroutines that begin with h5 and end with _f. +Java routine names begin with “H5*” and are prefixed with “H5.” as the class. Constants are +in the HDF5Constants class and are prefixed with "HDF5Constants.". +The APIs are listed below: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
API +DESCRIPTION +
H5 +Library Functions: general-purpose H5 functions +
H5A +Annotation Interface: attribute access and manipulation routines +
H5D +Dataset Interface: dataset access and manipulation routines +
H5E +Error Interface: error handling routines +
H5F +File Interface: file access routines +
H5G +Group Interface: group creation and operation routines +
H5I +Identifier Interface: identifier routines +
H5L +Link Interface: link routines +
H5O +Object Interface: object routines +
H5P +Property List Interface: object property list manipulation routines +
H5R +Reference Interface: reference routines +
H5S +Dataspace Interface: dataspace definition and access routines +
H5T +Datatype Interface: datatype creation and manipulation routines +
H5Z +Compression Interface: compression routine(s) +
+ +Navigate back: \ref index "Main" / \ref GettingStarted / \ref LearnBasics + +@page LBProg Programming Issues + +Navigate back: \ref index "Main" / \ref GettingStarted / \ref LearnBasics +
+ +Keep the following in mind when looking at the example programs included in this tutorial: + +\section LBProgAPI APIs vary with different languages +\li C routines begin with the prefix “H5*” where * is a single letter indicating the object on which the operation is to be performed: + + + + + + + + +
File Interface: #H5Fopen
Dataset Interface:#H5Dopen
+ +\li FORTRAN routines begin with “h5*” and end with “_f”: + + + + + + + + +
File Interface: h5fopen_f
Dataset Interface:h5dopen_f
+ +\li Java routine names begin with “H5*” and are prefixed with “H5.” as the class. Constants are +in the HDF5Constants class and are prefixed with "HDF5Constants.".: + + + + + + + + +
File Interface: H5.H5Fopen
Dataset Interface:H5.H5Dopen
+ +\li APIS for languages like C++, Java, and Python use methods associated with specific objects. + +\section LBProgTypes HDF5 library has its own defined types +\li #hid_t is used for object handles +\li hsize_t is used for dimensions +\li #herr_t is used for many return values + +\section LBProgLang Language specific files must be included in applications +
    +
  • +Python: Add "import h5py / import numpy" +
  • +
  • +C: Add "#include hdf5.h" +
  • +
  • +FORTRAN: Add "USE HDF5" and call h5open_f and h5close_f to initialize and close the HDF5 FORTRAN interface +
  • +
  • +Java: Add "import hdf.hdf5lib.H5; + import hdf.hdf5lib.HDF5Constants;" +
  • +
+ +Navigate back: \ref index "Main" / \ref GettingStarted / \ref LearnBasics + +@page LBFileCreate Creating an HDF5 File + +Navigate back: \ref index "Main" / \ref GettingStarted / \ref LearnBasics +
+ +An HDF5 file is a binary file containing scientific data and supporting metadata. +\section secLBFileCreate HDF5 File Access +To create an HDF5 file, an application must specify not only a file name, but a file access mode, +a file creation property list, and a file access property list. These terms are described below: +
    +
  • File access mode:
    +When creating a file, the file access mode specifies the action to take if the file already exists: +
      +
    • #H5F_ACC_TRUNC specifies that if the file already exists, the current contents will be deleted so +that the application can rewrite the file with new data. +
    • +
    • #H5F_ACC_EXCL specifies that the open will fail if the file already exists. If the file does not +already exist, the file access parameter is ignored. +
    • +
    +In either case, the application has both read and write access to the successfully created file. +
    +Note that there are two different access modes for opening existing files: +
      +
    • #H5F_ACC_RDONLY specifies that the application has read access but will not be allowed to write any data. +
    • +
    • #H5F_ACC_RDWR specifies that the application has read and write access. +
    • +
    +
  • +
  • File creation property list:
    The file creation property list is used to +control the file metadata. File metadata contains information about the size of the user-block*, +the size of various file data structures used by the HDF5 library, etc. In this tutorial, the +default file creation property list, #H5P_DEFAULT, is used.
    + *The user-block is a fixed-length block of data located at the beginning of the file which is +ignored by the HDF5 library. The user-block may be used to store any data or information found +to be useful to applications. +
  • +
  • File access property list:
    The file access property list is used to +control different methods of performing I/O on files. It also can be used to control how a file +is closed (whether or not to delay the actual file close until all objects in a file are closed). +The default file access property list, #H5P_DEFAULT, is used in this tutorial. +
  • +
+ +Please refer to the \ref sec_file section of the \ref UG and \ref H5F section in the \ref RM for +detailed information regarding file access/creation property lists and access modes. + +The steps to create and close an HDF5 file are as follows: +
    +
  1. Specify the file creation and access property lists, if necessary.
  2. +
  3. Create the file.
  4. +
  5. Close the file, and if necessary, close the property lists.
  6. +
+ +\section secLBFileExample Programming Example + +\subsection subsecLBFileExampleDesc Description +The following example code demonstrates how to create and close an HDF5 file. + +C +\code +#include "hdf5.h" + #define FILE "file.h5" + + int main() { + + hid_t file_id; /* file identifier */ + herr_t status; + + /* Create a new file using default properties. */ + file_id = H5Fcreate(FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + + /* Terminate access to the file. */ + status = H5Fclose(file_id); + } +\endcode + +Fortran +\code + PROGRAM FILEEXAMPLE + + USE HDF5 ! This module contains all necessary modules + + IMPLICIT NONE + + CHARACTER(LEN=8), PARAMETER :: filename = "filef.h5" ! File name + INTEGER(HID_T) :: file_id ! File identifier + + INTEGER :: error ! Error flag + +! +! Initialize FORTRAN interface. +! + CALL h5open_f (error) + ! + ! Create a new file using default properties. + ! + CALL h5fcreate_f(filename, H5F_ACC_TRUNC_F, file_id, error) + + ! + ! Terminate access to the file. + ! + CALL h5fclose_f(file_id, error) +! +! Close FORTRAN interface. +! + CALL h5close_f(error) + END PROGRAM FILEEXAMPLE +\endcode + +See \ref LBExamples for the examples used in the Learning the Basics tutorial. + +For details on compiling an HDF5 application: +[ \ref LBCompiling ] + +\subsection subsecLBFileExampleRem Remarks +\li In C: The include file hdf5.h contains definitions and declarations and must be included +in any program that uses the HDF5 library. +
+In FORTRAN: The module HDF5 contains definitions and declarations and must be used in any +program that uses the HDF5 library. Also note that #H5open MUST be called at the beginning of an HDF5 Fortran +application (prior to any HDF5 calls) to initialize the library and variables. The #H5close call MUST be at +the end of the HDF5 Fortran application. +\li #H5Fcreate creates an HDF5 file and returns the file identifier.
+For Fortran, the file creation property list and file access property list are optional. They can be omitted if the +default values are to be used.
+The root group is automatically created when a file is created. Every file has a root group and the path name of +the root group is always /. +\li #H5Fclose terminates access to an HDF5 file.
+When an HDF5 file is no longer accessed by a program, #H5Fclose must be called to release the resources used by the file. +This call is mandatory.
+Note that if #H5Fclose is called for a file, but one or more objects within the file remain open, those objects will +remain accessible until they are individually closed. This can cause access problems for other users, if objects were +inadvertently left open. A File Access property controls how the file is closed. + +\subsection subsecLBFileExampleCont File Contents +The HDF Group has developed tools for examining the contents of HDF5 files. The tool used throughout the HDF5 tutorial +is the HDF5 dumper, h5dump, which displays the file contents in human-readable form. The output of h5dump is an ASCII +display formatted according to the HDF5 DDL grammar. This grammar is defined, using Backus-Naur Form, in the +\ref DDLBNF110. + +To view the HDF5 file contents, simply type: +\code +h5dump +\endcode + + + + + + +
Describe the file contents of file.h5 using a directed graph.
+\image html imgLBFile.gif +
+ +The text description of file.h5, as generated by h5dump. The HDF5 file called file.h5 +contains a group called /, or the root group. (The file called filef.h5, created by the FORTRAN version of the example, +has the same output except that the filename shown is filef.h5.) +\code +HDF5 "file.h5" { + GROUP "/" { + } + } +\endcode + +\subsection subsecLBFileExampleDDL File Definition in DDL +The simplified DDL file definition for creating an HDF5 file. For simplicity, a simplified DDL is used in this tutorial. A +complete and more rigorous DDL can be found in the \ref DDLBNF110. + +The following symbol definitions are used in the DDL: +\code + ::= defined as + a token with the name tname + | one of or + * zero or more occurrences of +\endcode + +The simplified DDL for file definition is as follows: +\code + ::= HDF5 "" { } + + ::= GROUP "/" { * + * } + + ::= + + ::= | +\endcode + +Navigate back: \ref index "Main" / \ref GettingStarted / \ref LearnBasics + +@page LBDsetCreate Creating a Dataset +Navigate back: \ref index "Main" / \ref GettingStarted / \ref LearnBasics +
+ +A dataset is a multidimensional array of data elements, together with supporting metadata. To create +a dataset, the application program must specify the location at which to create the dataset, the +dataset name, the datatype and dataspace of the data array, and the property lists. + +\section secLBDsetCreateDtype Datatypes +A datatype is a collection of properties, all of which can be stored on disk, and which, when taken as +a whole, provide complete information for data conversion to or from that datatype. + +There are two categories of datatypes in HDF5: +
    +
  • Pre-defined: These datatypes are opened and closed by HDF5.
    +Pre-defined datatypes can be atomic or composite: +
    • Atomic datatypes cannot be decomposed into smaller datatype units at the API level. For example: integer, float, reference, string.
    • +
    • Composite datatypes are aggregations of one or more datatypes. For example: array, variable length, enumeration, compound.
    +
  • +
  • Derived: These datatypes are created or derived from the pre-defined types.
    +A simple example of creating a derived datatype is using the string datatype, H5T_C_S1, to create strings of more than one character:
    +\code + hid_t strtype; // Datatype ID + herr_t status; + + strtype = H5Tcopy (H5T_C_S1); + status = H5Tset_size (strtype, 5); // create string of length 5 +\endcode +
  • +
+ +Shown below is the HDF5 pre-defined datatypes. +\code + +-- integer + +-- floating point + +---- atomic ----+-- date and time + | +-- character string + HDF5 datatypes --| +-- bitfield + | +-- opaque + | + +---- compound +\endcode + +Some of the HDF5 predefined atomic datatypes are listed below. + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Examples of HDF5 predefined datatypes
DatatypeDescription
H5T_STD_I32LEFour-byte, little-endian, signed, two's complement integer
H5T_STD_U16BETwo-byte, big-endian, unsigned integer
H5T_IEEE_F32BEFour-byte, big-endian, IEEE floating point
H5T_IEEE_F64LEEight-byte, little-endian, IEEE floating point
H5T_C_S1One-byte, null-terminated string of eight-bit characters
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Examples of HDF5 predefined native datatypes
Native DatatypeCorresponding C or FORTRAN Type
C
H5T_NATIVE_INTint
H5T_NATIVE_FLOATfloat
H5T_NATIVE_CHARchar
H5T_NATIVE_DOUBLEdouble
H5T_NATIVE_LDOUBLElong double
Fortran
H5T_NATIVE_INTEGERinteger
H5T_NATIVE_REALreal
H5T_NATIVE_DOUBLEdouble precision
H5T_NATIVE_CHARACTERcharacter
+ +In this tutorial, we consider only HDF5 predefined integers. + +For further information on datatypes, see \ref sec_datatype in the \ref UG, in addition to the \ref LBDatatypes tutorial topic. + +\section secLBDsetCreateDspace Datasets and Dataspaces +A dataspace describes the dimensionality of the data array. A dataspace is either a regular N-dimensional +array of data points, called a simple dataspace, or a more general collection of data points organized +in another manner, called a complex dataspace. In this tutorial, we only consider simple dataspaces. + +HDF5 dataspaces +\code + +-- simple + HDF5 dataspaces --| + +-- complex +\endcode +The dimensions of a dataset can be fixed (unchanging), or they may be unlimited, which means that they are +extensible. A dataspace can also describe a portion of a dataset, making it possible to do partial +I/O operations on selections. + +\section secLBDsetCreateProp Property Lists +Property lists are a mechanism for modifying the default behavior when creating or accessing objects. For +more information on property lists see the \ref LBPropsList tutorial topic. + +The following property lists can be specified when creating a dataset: +\li Dataset Creation Property List
+When creating a dataset, HDF5 allows the user to specify how raw data is organized and/or compressed on +disk. This information is stored in a dataset creation property list and passed to the dataset interface. +The raw data on disk can be stored contiguously (in the same linear way that it is organized in memory), +partitioned into chunks, stored externally, etc. In this tutorial, we use the default dataset creation +property list (contiguous storage layout and no compression). For more information about dataset creation +property lists, see \ref sec_dataset in the \ref UG. +\li Link Creation Property List
+The link creation property list governs creation of the link(s) by which a new dataset is accessed and the +creation of any intermediate groups that may be missing. +\li Dataset Access Property List
+Dataset access property lists are properties that can be specified when accessing a dataset. + +\section secLBDsetCreateSteps Steps to Create a Dataset +To create an empty dataset (no data written) the following steps need to be taken: +
    +
  1. Obtain the location identifier where the dataset is to be created.
  2. +
  3. Define or specify the dataset characteristics: +
      +
    1. Define a datatype or specify a pre-defined datatype.
    2. +
    3. Define a dataspace.
    4. +
    5. Specify the property list(s) or use the default.
    6. +
  4. +
  5. Create the dataset.
  6. +
  7. Close the datatype, the dataspace, and the property list(s) if necessary.
  8. +
  9. Close the dataset.
  10. +
+In HDF5, datatypes and dataspaces are independent objects which are created separately from any dataset +that they might be attached to. Because of this, the creation of a dataset requires the definition of +the datatype and dataspace. In this tutorial, we use the HDF5 predefined datatypes (integer) and consider +only simple dataspaces. Hence, only the creation of dataspace objects is needed. + +\section secLBDsetCreateHL High Level APIs +The High Level \ref H5LT include functions that simplify and condense the steps for +creating datasets in HDF5. The examples in the following section use the standard APIs. For a +quick start you may prefer to look at the \ref H5LT at this time. + +If you plan to work with images, please look at the High Level \ref H5IM, as well. + +\section secLBDsetCreateProg Programming Example + +\subsection subsecLBDsetCreateProgDesc Description +See \ref LBExamples for the examples used in the \ref LearnBasics tutorial. + +The example shows how to create an empty dataset. It creates a file called dset.h5 +in the C version (dsetf.h5 in Fortran), defines the dataset dataspace, creates a +dataset which is a 4x6 integer array, and then closes the dataspace, the dataset, and the file. + +For details on compiling an HDF5 application: [ \ref LBCompiling ] + +\subsection subsecLBDsetCreateProgRem Remarks +#H5Screate_simple creates a new simple dataspace and returns a dataspace identifier. +#H5Sclose releases and terminates access to a dataspace. + +C +\code + dataspace_id = H5Screate_simple (rank, dims, maxdims); + status = H5Sclose (dataspace_id ); +\endcode + +FORTRAN +\code + CALL h5screate_simple_f (rank, dims, dataspace_id, hdferr, maxdims=max_dims) + or + CALL h5screate_simple_f (rank, dims, dataspace_id, hdferr) + + CALL h5sclose_f (dataspace_id, hdferr) +\endcode + +#H5Dcreate creates an empty dataset at the specified location and returns a dataset identifier. +#H5Dclose closes the dataset and releases the resource used by the dataset. This call is mandatory. + +C +\code + dataset_id = H5Dcreate(file_id, "/dset", H5T_STD_I32BE, dataspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + status = H5Dclose (dataset_id); +\endcode + +FORTRAN +\code + CALL h5dcreate_f (loc_id, name, type_id, dataspace_id, dset_id, hdferr) + CALL h5dclose_f (dset_id, hdferr) +\endcode + +Note that if using the pre-defined datatypes in FORTRAN, then a call must be made to initialize and terminate access to the pre-defined datatypes: +\code + CALL h5open_f (hdferr) + CALL h5close_f (hdferr) +\endcode + +H5open must be called before any HDF5 library subroutine calls are made; +H5close must be called after the final HDF5 library subroutine call. + +See the programming example for an illustration of the use of these calls. + +\subsection subsecLBDsetCreateContent File Contents +The contents of the file dset.h5 (dsetf.h5 for FORTRAN) are shown below: + + + + + +
Contents of dset.h5 ( dsetf.h5)
+\image html imgLBDsetCreate.gif +
+ + + + + + + + +
dset.h5 in DDLdsetf.h5 in DDL
+\code +HDF5 "dset.h5" { +GROUP "/" { + DATASET "dset" { + DATATYPE { H5T_STD_I32BE } + DATASPACE { SIMPLE ( 4, 6 ) / ( 4, 6 ) } + DATA { + 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0 + } + } +} +} +\endcode + +\code +HDF5 "dsetf.h5" { +GROUP "/" { + DATASET "dset" { + DATATYPE { H5T_STD_I32BE } + DATASPACE { SIMPLE ( 6, 4 ) / ( 6, 4 ) } + DATA { + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0 + } + } +} +} +\endcode +
+Note in above that #H5T_STD_I32BE, a 32-bit Big Endian integer, is an HDF atomic datatype. + +\subsection subsecLBDsetCreateProgDDL Dataset Definition in DDL +The following is the simplified DDL dataset definition: +\code + ::= DATASET "" { + + + * } + + ::= DATATYPE { } + + ::= DATASPACE { SIMPLE / } + + ::= +\endcode + +
+Navigate back: \ref index "Main" / \ref GettingStarted / \ref LearnBasics + +@page LBDsetRW Reading From and Writing To a Dataset +Navigate back: \ref index "Main" / \ref GettingStarted / \ref LearnBasics +
+ +\section secLBDsetRW Dataset I/O Operation +During a dataset I/O operation, the library transfers raw data between memory and the file. The data in memory +can have a datatype different from that of the file and can also be of a different size (i.e., the data in +memory is a subset of the dataset elements, or vice versa). Therefore, to perform read or write operations, +the application program must specify: +\li The dataset +\li The dataset's datatype in memory +\li The dataset's dataspace in memory +\li The dataset's dataspace in the file +\li The dataset transfer property list
+
    +
  • (The dataset transfer property list controls various aspects of the I/O operations, such as the number +of processes participating in a collective I/O request or hints to the library to control caching of raw +data. In this tutorial, we use the default dataset transfer property list.)
  • +
+\li The data buffer + +The steps to read from or write to a dataset are as follows: +
    +
  1. Obtain the dataset identifier.
  2. +
  3. Specify the memory datatype.
  4. +
  5. Specify the memory dataspace.
  6. +
  7. Specify the file dataspace.
  8. +
  9. Specify the transfer properties.
  10. +
  11. Perform the desired operation on the dataset.
  12. +
  13. Close the dataset.
  14. +
  15. Close the dataspace, datatype, and property list if necessary.
  16. +
+ +To read from or write to a dataset, the #H5Dread and #H5Dwrite routines are used. + +C +\code + status = H5Dread (set_id, mem_type_id, mem_space_id, file_space_id, xfer_prp, buf ); + status = H5Dwrite (set_id, mem_type_id, mem_space_id, file_space_id, xfer_prp, buf); +\endcode + +Fortran +\code + CALL h5dread_f(dset_id, mem_type_id, buf, dims, error, & + mem_space_id=mspace_id, file_space_id=fspace_id, & + xfer_prp=xfer_plist_id) + or + CALL h5dread_f(dset_id, mem_type_id, buf, dims, error) + + + CALL h5dwrite_f(dset_id, mem_type_id, buf, dims, error, & + mem_space_id=mspace_id, file_space_id=fspace_id, & + xfer_prp=xfer_plist_id) + or + CALL h5dwrite_f(dset_id, mem_type_id, buf, dims, error) +\endcode + +\section secLBDsetRWHL High Level APIs +The High Level \ref H5LT include functions that simplify and condense the steps for creating and +reading datasets. Please be sure to review them, in addition to this tutorial. + +\section secLBDsetRWEx Programming Example + +\subsection secLBDsetRWExDesc Description +See \ref LBExamples for the examples used in the \ref LearnBasics tutorial. + +The example shows how to read and write an existing dataset. It opens the file created in the previous example, +obtains the dataset identifier for the dataset /dset, writes the dataset to the file, then reads +the dataset back. It then closes the dataset and file. + +Note that #H5S_ALL is passed in for both the memory and file dataspace parameters in the read and write calls. +This indicates that the entire dataspace of the dataset will be read or written to. #H5S_ALL by itself does not +necessarily have this meaning. See the \ref RM entry for #H5Dread or #H5Dwrite for more information on using #H5S_ALL. + +For details on compiling an HDF5 application: +[ \ref LBCompiling ] + +\subsection secLBDsetRWExRem Remarks +#H5Fopen opens an existing file and returns a file identifier. + +#H5Dopen opens an existing dataset with the specified name and location. + +#H5Dwrite writes raw data from an application buffer to the specified dataset, converting from the datatype and +dataspace of the dataset in memory to the datatype and dataspace of the dataset in the file. Specifying #H5S_ALL +for both the memory and file dataspaces indicates that the entire dataspace of the dataset is to be written to. +#H5S_ALL by itself does not necessarily have this meaning. See the \ref RM entry for #H5Dwrite for more information +on using #H5S_ALL. + +#H5Dread reads raw data from the specified dataset to an application buffer, converting from the file datatype and +dataspace to the memory datatype and dataspace. Specifying #H5S_ALL for both the memory and file dataspaces +indicates that the entire dataspace of the dataset is to be read. #H5S_ALL by itself does not necessarily have +this meaning. See the \ref RM entry for #H5Dread for more information on using #H5S_ALL. + +\subsection secLBDsetRWExCont File Contents + +Shown below is the contents of dset.h5 (created by the C program). + +dset.h5 in DDL +\code + HDF5 "dset.h5" { + GROUP "/" { + DATASET "dset" { + DATATYPE { H5T_STD_I32BE } + DATASPACE { SIMPLE ( 4, 6 ) / ( 4, 6 ) } + DATA { + 1, 2, 3, 4, 5, 6, + 7, 8, 9, 10, 11, 12, + 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24 + } + } + } + } +\endcode + +Shown below is the contents of dsetf.h5 (created by the FORTRAN program). + +dsetf.h5 in DDL +\code + HDF5 "dsetf.h5" { + GROUP "/" { + DATASET "dset" { + DATATYPE { H5T_STD_I32BE } + DATASPACE { SIMPLE ( 6, 4 ) / ( 6, 4 ) } + DATA { + 1, 7, 13, 19, + 2, 8, 14, 20, + 3, 9, 15, 21, + 4, 10, 16, 22, + 5, 11, 17, 23, + 6, 12, 18, 24 + } + } + } + } +\endcode + +
+Navigate back: \ref index "Main" / \ref GettingStarted / \ref LearnBasics + +@page LBAttrCreate Creating an Attribute +Navigate back: \ref index "Main" / \ref GettingStarted / \ref LearnBasics +
+ +Attributes are small datasets that can be used to describe the nature and/or the intended usage of +the object they are attached to. In this section, we show how to create, read, and write an attribute. + +\section secLBAttrCreate Creating an attribute +Creating an attribute is similar to creating a dataset. To create an attribute, the application must +specify the object which the attribute is attached to, the datatype and dataspace of the attribute +data, and the attribute creation property list. + +The steps to create an attribute are as follows: +
    +
  1. Obtain the object identifier that the attribute is to be attached to.
  2. +
  3. Define the characteristics of the attribute and specify the attribute creation property list. +
      +
    • Define the datatype.
    • +
    • Define the dataspace.
    • +
    • Specify the attribute creation property list.
    • +
  4. +
  5. Create the attribute.
  6. +
  7. Close the attribute and datatype, dataspace, and attribute creation property list, if necessary.
  8. +
+ +To create and close an attribute, the calling program must use #H5Acreate and #H5Aclose. For example: + +C +\code + attr_id = H5Acreate (dataset_id, "Units", H5T_STD_I32BE, dataspace_id, H5P_DEFAULT, H5P_DEFAULT) + status = H5Aclose (attr_id); +\endcode + +Fortran +\code + CALL h5acreate_f (dset_id, attr_nam, type_id, space_id, attr_id, & + hdferr, creation_prp=creat_plist_id) + or + CALL h5acreate_f (dset_id, attr_nam, type_id, space_id, attr_id, hdferr) + + CALL h5aclose_f (attr_id, hdferr) +\endcode + +\section secLBAttrCreateRW Reading/Writing an attribute +Attributes may only be read or written as an entire object; no partial I/O is supported. Therefore, +to perform I/O operations on an attribute, the application needs only to specify the attribute and +the attribute's memory datatype. + +The steps to read or write an attribute are as follows. +
    +
  1. Obtain the attribute identifier.
  2. +
  3. Specify the attribute's memory datatype.
  4. +
  5. Perform the desired operation.
  6. +
  7. Close the memory datatype if necessary.
  8. +
+ +To read and/or write an attribute, the calling program must contain the #H5Aread and/or +#H5Awrite routines. For example: + +C +\code + status = H5Aread (attr_id, mem_type_id, buf); + status = H5Awrite (attr_id, mem_type_id, buf); +\endcode + +Fortran +\code + CALL h5awrite_f (attr_id, mem_type_id, buf, dims, hdferr) + CALL h5aread_f (attr_id, mem_type_id, buf, dims, hdferr) +\endcode + +\section secLBAttrCreateHL High Level APIs +The High Level \ref H5LT include functions that simplify and condense the steps for creating and +reading datasets. Please be sure to review them, in addition to this tutorial. + +\section secLBAttrCreateRWEx Programming Example + +\subsection secLBAttrCreateRWExDesc Description +See \ref LBExamples for the examples used in the \ref LearnBasics tutorial. + +The example shows how to create and write a dataset attribute. It opens an existing file dset.h5 +in C (dsetf.h5 in FORTRAN), obtains the identifier of the dataset /dset, defines +the attribute's dataspace, creates the dataset attribute, writes the attribute, and then closes the attribute's +dataspace, attribute, dataset, and file. + +For details on compiling an HDF5 application: +[ \ref LBCompiling ] + +\subsection secLBAttrCreateRWExRem Remarks +#H5Acreate creates an attribute which is attached to the object specified by the first parameter, and returns an identifier. + +#H5Awrite writes the entire attribute, and returns the status of the write. + +When an attribute is no longer accessed by a program, #H5Aclose must be called to release the attribute from use. +An #H5Aclose/h5aclose_f call is mandatory. + +\subsection secLBAttrCreateRWExCont File Contents + +Shown below is the contents and the attribute definition of dset.h5 (created by the C program). + +dset.h5 in DDL +\code +HDF5 "dset.h5" { +GROUP "/" { +DATASET "dset" { +DATATYPE { H5T_STD_I32BE } +DATASPACE { SIMPLE ( 4, 6 ) / ( 4, 6 ) } +DATA { + 1, 2, 3, 4, 5, 6, + 7, 8, 9, 10, 11, 12, + 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24 +} +ATTRIBUTE "attr" { + DATATYPE { H5T_STD_I32BE } + DATASPACE { SIMPLE ( 2 ) / ( 2 ) } + DATA { + 100, 200 + } +} +} +} +} +\endcode + +Shown below is the contents and the attribute definition of dsetf.h5 (created by the FORTRAN program). + +dsetf.h5 in DDL +\code +HDF5 "dsetf.h5" { +GROUP "/" { +DATASET "dset" { +DATATYPE { H5T_STD_I32BE } +DATASPACE { SIMPLE ( 6, 4 ) / ( 6, 4 ) } +DATA { + 1, 7, 13, 19, + 2, 8, 14, 20, + 3, 9, 15, 21, + 4, 10, 16, 22, + 5, 11, 17, 23, + 6, 12, 18, 24 +} +ATTRIBUTE "attr" { + DATATYPE { H5T_STD_I32BE } + DATASPACE { SIMPLE ( 2 ) / ( 2 ) } + DATA { + 100, 200 + } +} +} +} +} +\endcode + +\subsection secLBAttrCreateRWExDDL Attribute Definition in DDL + +HDF5 Attribute Definition +\code + ::= ATTRIBUTE "" { + + } +\endcode + +
+Navigate back: \ref index "Main" / \ref GettingStarted / \ref LearnBasics + +*/ diff --git a/doxygen/dox/LearnBasics2.dox b/doxygen/dox/LearnBasics2.dox new file mode 100644 index 0000000..ffcb971 --- /dev/null +++ b/doxygen/dox/LearnBasics2.dox @@ -0,0 +1,1159 @@ +/** @page LBGrpCreate Creating an Group +Navigate back: \ref index "Main" / \ref GettingStarted / \ref LearnBasics +
+ +\section secLBGrpCreate Creating an group +An HDF5 group is a structure containing zero or more HDF5 objects. The two primary HDF5 objects are groups and datasets. To create a group, the calling program must: +
    +
  1. Obtain the location identifier where the group is to be created.
  2. +
  3. Create the group.
  4. +
  5. Close the group.
  6. +
+ +To create a group, the calling program must call #H5Gcreate. +To close the group, #H5Gclose must be called. The close call is mandatory. + +For example: + +C +\code + group_id = H5Gcreate(file_id, "/MyGroup", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + status = H5Gclose (group_id); +\endcode + +Fortran +\code + CALL h5gcreate_f (loc_id, name, group_id, error) + CALL h5gclose_f (group_id, error) +\endcode + +\section secLBGrpCreateRWEx Programming Example + +\subsection secLBGrpCreateRWExDesc Description +See \ref LBExamples for the examples used in the \ref LearnBasics tutorial. + +The example shows how to create and close a group. It creates a file called group.h5 in C +(groupf.h5 for FORTRAN), creates a group called MyGroup in the root group, and then closes the group and file. + +For details on compiling an HDF5 application: +[ \ref LBCompiling ] + +\subsection secLBGrpCreateRWExCont File Contents + +Shown below is the contents and the definition of the group of group.h5 (created by the C program). +(The FORTRAN program creates the HDF5 file groupf.h5 and the resulting DDL shows the filename +groupf.h5 in the first line.) + + + + + +
The Contents of group.h5.
+\image html imggrpcreate.gif +
+ +group.h5 in DDL +\code +HDF5 "group.h5" { +GROUP "/" { + GROUP "MyGroup" { + } +} +} +\endcode + +
+Navigate back: \ref index "Main" / \ref GettingStarted / \ref LearnBasics + +@page LBGrpCreateNames Creating Groups using Absolute and Relative Names +Navigate back: \ref index "Main" / \ref GettingStarted / \ref LearnBasics +
+ +Recall that to create an HDF5 object, we have to specify the location where the object is to be created. +This location is determined by the identifier of an HDF5 object and the name of the object to be created. +The name of the created object can be either an absolute name or a name relative to the specified identifier. +In the previous example, we used the file identifier and the absolute name /MyGroup to create a group. + +In this section, we discuss HDF5 names and show how to use absolute and relative names. + +\section secLBGrpCreateNames Names +HDF5 object names are a slash-separated list of components. There are few restrictions on names: component +names may be any length except zero and may contain any character except slash (/) and the null terminator. +A full name may be composed of any number of component names separated by slashes, with any of the component +names being the special name . (a dot or period). A name which begins with a slash is an absolute name which +is accessed beginning with the root group of the file; all other names are relative names and and the named +object is accessed beginning with the specified group. A special case is the name / (or equivalent) which +refers to the root group. + +Functions which operate on names generally take a location identifier, which can be either a file identifier +or a group identifier, and perform the lookup with respect to that location. Several possibilities are +described in the following table: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Location TypeObject NameDescription
File identifier/foo/barThe object bar in group foo in the root group.
Group identifier/foo/barThe object bar in group foo in the root group of the file containing the specified group. +In other words, the group identifier's only purpose is to specify a file.
File identifier/The root group of the specified file.
Group identifier/The root group of the file containing the specified group.
Group identifierfoo/barThe object bar in group foo in the specified group.
File identifier.The root group of the file.
Group identifier.The specified group.
Other identifier.The specified object.
+ +\section secLBGrpCreateNamesEx Programming Example + +\subsection secLBGrpCreateNamesExDesc Description +See \ref LBExamples for the examples used in the \ref LearnBasics tutorial. + +The example code shows how to create groups using absolute and relative names. It creates three groups: the first two groups are created using +the file identifier and the group absolute names while the third group is created using a group identifier and a name relative to the specified group. + +For details on compiling an HDF5 application: +[ \ref LBCompiling ] + +\subsection secLBGrpCreateNamesExRem Remarks +#H5Gcreate creates a group at the location specified by a location identifier and a name. The location identifier +can be a file identifier or a group identifier and the name can be relative or absolute. + +The first #H5Gcreate/h5gcreate_f creates the group MyGroup in the root group of the specified file. + +The second #H5Gcreate/h5gcreate_f creates the group Group_A in the group MyGroup in the root group of the specified +file. Note that the parent group (MyGroup) already exists. + +The third #H5Gcreate/h5gcreate_f creates the group Group_B in the specified group. + +\subsection secLBGrpCreateNamesExCont File Contents + +Shown below is the contents and the definition of the group of groups.h5 (created by the C program). +(The FORTRAN program creates the HDF5 file groupsf.h5 and the resulting DDL shows the filename +groupsf.h5 in the first line.) + + + + + +
The Contents of groups.h5.
+\image html imggrps.gif +
+ +groups.h5 in DDL +\code +HDF5 "groups.h5" { +GROUP "/" { + GROUP "MyGroup" { + GROUP "Group_A" { + } + GROUP "Group_B" { + } + } +} +} +\endcode + +
+Navigate back: \ref index "Main" / \ref GettingStarted / \ref LearnBasics + +@page LBGrpDset Creating Datasets in Groups +Navigate back: \ref index "Main" / \ref GettingStarted / \ref LearnBasics +
+ +\section secLBGrpDset Datasets in Groups +We have shown how to create groups, datasets, and attributes. In this section, we show how to create +datasets in groups. Recall that #H5Dcreate creates a dataset at the location specified by a location +identifier and a name. Similar to #H5Gcreate, the location identifier can be a file identifier or a +group identifier and the name can be relative or absolute. The location identifier and the name +together determine the location where the dataset is to be created. If the location identifier and +name refer to a group, then the dataset is created in that group. + +\section secLBGrpDsetEx Programming Example + +\subsection secLBGrpDsetExDesc Description +See \ref LBExamples for the examples used in the \ref LearnBasics tutorial. + +The example shows how to create a dataset in a particular group. It opens the file created in the previous example and creates two datasets: + +For details on compiling an HDF5 application: +[ \ref LBCompiling ] + +\subsection secLBGrpDsetExCont File Contents + +Shown below is the contents and the definition of the group of groups.h5 (created by the C program). +(The FORTRAN program creates the HDF5 file groupsf.h5 and the resulting DDL shows the filename +groupsf.h5 in the first line.) + + + + + +
The contents of the file groups.h5 (groupsf.h5 for FORTRAN)
+\image html imggrpdsets.gif +
+ +groups.h5 in DDL +\code +HDF5 "groups.h5" { +GROUP "/" { +GROUP "MyGroup" { +GROUP "Group_A" { + DATASET "dset2" { + DATATYPE { H5T_STD_I32BE } + DATASPACE { SIMPLE ( 2, 10 ) / ( 2, 10 ) } + DATA { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 + } + } +} +GROUP "Group_B" { +} +DATASET "dset1" { + DATATYPE { H5T_STD_I32BE } + DATASPACE { SIMPLE ( 3, 3 ) / ( 3, 3 ) } + DATA { + 1, 2, 3, + 1, 2, 3, + 1, 2, 3 + } +} +} +} +} +\endcode + +groupsf.h5 in DDL +\code +HDF5 "groupsf.h5" { +GROUP "/" { +GROUP "MyGroup" { +GROUP "Group_A" { + DATASET "dset2" { + DATATYPE { H5T_STD_I32BE } + DATASPACE { SIMPLE ( 10, 2 ) / ( 10, 2 ) } + DATA { + 1, 1, + 2, 2, + 3, 3, + 4, 4, + 5, 5, + 6, 6, + 7, 7, + 8, 8, + 9, 9, + 10, 10 + } + } +} +GROUP "Group_B" { +} +DATASET "dset1" { + DATATYPE { H5T_STD_I32BE } + DATASPACE { SIMPLE ( 3, 3 ) / ( 3, 3 ) } + DATA { + 1, 1, 1, + 2, 2, 2, + 3, 3, 3 + } +} +} +} +} +\endcode + +
+Navigate back: \ref index "Main" / \ref GettingStarted / \ref LearnBasics + +@page LBDsetSubRW Reading From or Writing To a Subset of a Dataset +Navigate back: \ref index "Main" / \ref GettingStarted / \ref LearnBasics +
+ +\section secLBDsetSubRW Dataset Subsets +There are two ways that you can select a subset in an HDF5 dataset and read or write to it: +
  • +Hyperslab Selection: The #H5Sselect_hyperslab call selects a logically contiguous +collection of points in a dataspace, or a regular pattern of points or blocks in a dataspace. +
  • +Element Selection: The #H5Sselect_elements call selects elements in an array. +
+ +HDF5 allows you to read from or write to a portion or subset of a dataset by: +\li Selecting a Subset of the Dataset's Dataspace, +\li Selecting a Memory Dataspace, +\li Reading From or Writing to a Dataset Subset. + +\section secLBDsetSubRWSel Selecting a Subset of the Dataset's Dataspace +First you must obtain the dataspace of a dataset in a file by calling #H5Dget_space. + +Then select a subset of that dataspace by calling #H5Sselect_hyperslab. The offset, count, stride +and block parameters of this API define the shape and size of the selection. They must be arrays +with the same number of dimensions as the rank of the dataset's dataspace. These arrays ALL work +together to define a selection. A change to one of these arrays can affect the others. +\li \em offset: An array that specifies the offset of the starting element of the specified hyperslab. +\li \em count: An array that determines how many blocks to select from the dataspace in each dimension. If the block +size for a dimension is one then the count is the number of elements along that dimension. +\li \em stride: An array that allows you to sample elements along a dimension. For example, a stride of one (or NULL) +will select every element along a dimension, a stride of two will select every other element, and a stride of three +will select an element after every two elements. +\li \em block: An array that determines the size of the element block selected from a dataspace. If the block size +is one or NULL then the block size is a single element in that dimension. + +\section secLBDsetSubRWMem Selecting a Memory Dataspace +You must select a memory dataspace in addition to a file dataspace before you can read a subset from or write a subset +to a dataset. A memory dataspace can be specified by calling #H5Screate_simple. + +The memory dataspace passed to the read or write call must contain the same number of elements as the file dataspace. +The number of elements in a dataspace selection can be determined with the #H5Sget_select_npoints API. + +\section secLBDsetSubRWSub Reading From or Writing To a Dataset Subset +To read from or write to a dataset subset, the #H5Dread and #H5Dwrite routines are used. The memory and file dataspace +identifiers from the selections that were made are passed into the read or write call. For example (C): +\code + status = H5Dwrite (.., .., memspace_id, dataspace_id, .., ..); +\endcode + +\section secLBDsetSubRWProg Programming Example + +\subsection subsecLBDsetSubRWProgDesc Description +See \ref LBExamples for the examples used in the \ref LearnBasics tutorial. + +The example creates an 8 x 10 integer dataset in an HDF5 file. It then selects and writes to a 3 x 4 subset +of the dataset created with the dimensions offset by 1 x 2. (If using Fortran, the dimensions will be swapped. +The dataset will be 10 x 8, the subset will be 4 x 3, and the offset will be 2 x 1.) + +PLEASE NOTE that the examples and images below were created using C. + +The following image shows the dataset that gets written originally, and the subset of data that gets modified +afterwards. Dimension 0 is vertical and Dimension 1 is horizontal as shown below: + + + + +
+\image html LBDsetSubRWProg.png +
+ +The subset on the right above is created using these values for offset, count stride, and block: +\code +offset = {1, 2} + +count = {3, 4} + +stride = {1, 1} + +block = {1, 1} +\endcode + +\subsection subsecLBDsetSubRWProgExper Experiments with Different Selections +Following are examples of changes that can be made to the example code provided to better understand +how to make selections. + +\subsubsection subsubsecLBDsetSubRWProgExperOne Example 1 +By default the example code will select and write to a 3 x 4 subset. You can modify the count +parameter in the example code to select a different subset, by changing the value of +DIM0_SUB (C, C++) / dim0_sub (Fortran) near the top. Change its value to 7 to create a 7 x 4 subset: + + + + +
+\image html imgLBDsetSubRW11.png +
+ +If you were to change the subset to 8 x 4, the selection would be beyond the extent of the dimension: + + + + +
+\image html imgLBDsetSubRW12.png +
+ +The write will fail with the error: "file selection+offset not within extent" + +\subsubsection subsubsecLBDsetSubRWProgExperTwo Example 2 +In the example code provided, the memory and file dataspaces passed to the H5Dwrite call have the +same size, 3 x 4 (DIM0_SUB x DIM1_SUB). Change the size of the memory dataspace to be 4 x 4 so that +they do not match, and then compile: +\code + dimsm[0] = DIM0_SUB + 1; + dimsm[1] = DIM1_SUB; + memspace_id = H5Screate_simple (RANK, dimsm, NULL); +\endcode +The code will fail with the error: "src and dest data spaces have different sizes" + +How many elements are in the memory and file dataspaces that were specified above? Add these lines: +\code + hssize_t size; + + /* Just before H5Dwrite call the following */ + size = H5Sget_select_npoints (memspace_id); + printf ("\nmemspace_id size: %i\n", size); + size = H5Sget_select_npoints (dataspace_id); + printf ("dataspace_id size: %i\n", size); +\endcode + +You should see these lines followed by the error: +\code + memspace_id size: 16 + dataspace_id size: 12 +\endcode + +\subsubsection subsubsecLBDsetSubRWProgExperThree Example 3 +This example shows the selection that occurs if changing the values of the offset, count, +stride and block parameters in the example code. + +This will select two blocks. The count array specifies the number of blocks. The block array +specifies the size of a block. The stride must be modified to accommodate the block size. + + + + +
+\image html imgLBDsetSubRW31.png +
+ +Now try modifying the count as shown below. The write will fail because the selection goes beyond the extent of the dimension: + + + + +
+\image html imgLBDsetSubRW32.png +
+ +If the offset were 1x1 (instead of 1x2), then the selection can be made: + + + + +
+\image html imgLBDsetSubRW33.png +
+ +The selections above were tested with the +
h5_subsetbk.c +example code. The memory dataspace was defined as one-dimensional. + +\subsection subsecLBDsetSubRWProgRem Remarks +\li In addition to #H5Sselect_hyperslab, this example introduces the #H5Dget_space call to obtain the dataspace of a dataset. +\li If using the default values for the stride and block parameters of #H5Sselect_hyperslab, then, for C you can specify NULL +for these parameters, rather than passing in an array for each, and for Fortran 90 you can omit these parameters. + +
+Navigate back: \ref index "Main" / \ref GettingStarted / \ref LearnBasics + +@page LBDatatypes Datatype Basics +Navigate back: \ref index "Main" / \ref GettingStarted / \ref LearnBasics +
+ +\section secLBDtype What is a Datatype? +A datatype is a collection of datatype properties which provide complete information for data conversion to or from that datatype. + +Datatypes in HDF5 can be grouped as follows: +\li Pre-Defined Datatypes: These are datatypes that are created by HDF5. They are actually opened +(and closed) by HDF5, and can have a different value from one HDF5 session to the next. +\li Derived Datatypes: These are datatypes that are created or derived from the pre-defined datatypes. +Although created from pre-defined types, they represent a category unto themselves. An example of a commonly used derived +datatype is a string of more than one character. + +\section secLBDtypePre Pre-defined Datatypes +The properties of pre-defined datatypes are: +\li Pre-defined datatypes are opened and closed by HDF5. +\li A pre-defined datatype is a handle and is NOT PERSISTENT. Its value can be different from one HDF5 session to the next. +\li Pre-defined datatypes are Read-Only. +\li As mentioned, other datatypes can be derived from pre-defined datatypes. + +There are two types of pre-defined datatypes, standard (file) and native. + +

Standard

+A standard (or file) datatype can be: +
    +
  • Atomic: A datatype which cannot be decomposed into smaller datatype units at the API level. +The atomic datatypes are: +
      +
    • integer
    • +
    • float
    • +
    • string (1-character)
    • +
    • date and time
    • +
    • bitfield
    • +
    • reference
    • +
    • opaque
    • +
    +
  • +
  • Composite: An aggregation of one or more datatypes. +Composite datatypes include: +
      +
    • array
    • +
    • variable length
    • +
    • enumeration
    • +
    • compound datatypes
    • +
    +Array, variable length, and enumeration datatypes are defined in terms of a single atomic datatype, +whereas a compound datatype is a datatype composed of a sequence of datatypes. +
  • +
+ + + + + + + + +
Notes
+\li Standard pre-defined datatypes are the SAME on all platforms. +\li They are the datatypes that you see in an HDF5 file. +\li They are typically used when creating a dataset. +
+ +

Native

+Native pre-defined datatypes are used for memory operations, such as reading and writing. They are +NOT THE SAME on different platforms. They are similar to C type names, and are aliased to the +appropriate HDF5 standard pre-defined datatype for a given platform. + +For example, when on an Intel based PC, #H5T_NATIVE_INT is aliased to the standard pre-defined type, +#H5T_STD_I32LE. On a MIPS machine, it is aliased to #H5T_STD_I32BE. + + + + + + + +
Notes
+\li Native datatypes are NOT THE SAME on all platforms. +\li Native datatypes simplify memory operations (read/write). The HDF5 library automatically converts as needed. +\li Native datatypes are NOT in an HDF5 File. The standard pre-defined datatype that a native datatype corresponds +to is what you will see in the file. +
+ +

Pre-Defined

+The following table shows the native types and the standard pre-defined datatypes they correspond +to. (Keep in mind that HDF5 can convert between datatypes, so you can specify a buffer of a larger +type for a dataset of a given type. For example, you can read a dataset that has a short datatype +into a long integer buffer.) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Some HDF5 pre-defined native datatypes and corresponding standard (file) type
C TypeHDF5 Memory TypeHDF5 File Type*
Integer
int#H5T_NATIVE_INT#H5T_STD_I32BE or #H5T_STD_I32LE
short#H5T_NATIVE_SHORT#H5T_STD_I16BE or #H5T_STD_I16LE
long#H5T_NATIVE_LONG#H5T_STD_I32BE, #H5T_STD_I32LE, + #H5T_STD_I64BE or #H5T_STD_I64LE
long long#H5T_NATIVE_LLONG#H5T_STD_I64BE or #H5T_STD_I64LE
unsigned int#H5T_NATIVE_UINT#H5T_STD_U32BE or #H5T_STD_U32LE
unsigned short#H5T_NATIVE_USHORT#H5T_STD_U16BE or #H5T_STD_U16LE
unsigned long#H5T_NATIVE_ULONG#H5T_STD_U32BE, #H5T_STD_U32LE, + #H5T_STD_U64BE or #H5T_STD_U64LE
unsigned long long#H5T_NATIVE_ULLONG#H5T_STD_U64BE or #H5T_STD_U64LE
Float
float#H5T_NATIVE_FLOAT#H5T_IEEE_F32BE or #H5T_IEEE_F32LE
double#H5T_NATIVE_DOUBLE#H5T_IEEE_F64BE or #H5T_IEEE_F64LE
+ + + + + + + + + + + + + + + + + + + + + + + +
Some HDF5 pre-defined native datatypes and corresponding standard (file) type
F90 TypeHDF5 Memory TypeHDF5 File Type*
integerH5T_NATIVE_INTEGER#H5T_STD_I32BE(8,16) or #H5T_STD_I32LE(8,16)
realH5T_NATIVE_REAL#H5T_IEEE_F32BE or #H5T_IEEE_F32LE
double-precision#H5T_NATIVE_DOUBLE#H5T_IEEE_F64BE or #H5T_IEEE_F64LE
+ + + + + +
* Note that the HDF5 File Types listed are those that are most commonly created. + The file type created depends on the compiler switches and platforms being + used. For example, on the Cray an integer is 64-bit, and using #H5T_NATIVE_INT (C) + or H5T_NATIVE_INTEGER (F90) would result in an #H5T_STD_I64BE file type.
+ +The following code is an example of when you would use standard pre-defined datatypes vs. native types: +\code + #include "hdf5.h" + + main() { + + hid_t file_id, dataset_id, dataspace_id; + herr_t status; + hsize_t dims[2]={4,6}; + int i, j, dset_data[4][6]; + + for (i = 0; i < 4; i++) + for (j = 0; j < 6; j++) + dset_data[i][j] = i * 6 + j + 1; + + file_id = H5Fcreate ("dtypes.h5", H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + + dataspace_id = H5Screate_simple (2, dims, NULL); + + dataset_id = H5Dcreate (file_id, "/dset", H5T_STD_I32BE, dataspace_id, + H5P_DEFAULT); + + status = H5Dwrite (dataset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, + H5P_DEFAULT, dset_data); + + status = H5Dclose (dataset_id); + + status = H5Fclose (file_id); + } +\endcode +By using the native types when reading and writing, the code that reads from or writes to a dataset +can be the same for different platforms. + +Can native types also be used when creating a dataset? Yes. However, just be aware that the resulting +datatype in the file will be one of the standard pre-defined types and may be different than expected. + +What happens if you do not use the correct native datatype for a standard (file) datatype? Your data +may be incorrect or not what you expect. + +\section secLBDtypeDer Derived Datatypes +ANY pre-defined datatype can be used to derive user-defined datatypes. + +To create a datatype derived from a pre-defined type: +
    +
  1. Make a copy of the pre-defined datatype: +\code + tid = H5Tcopy (H5T_STD_I32BE); +\endcode +
  2. +
  3. Change the datatype.
  4. +
+ +There are numerous datatype functions that allow a user to alter a pre-defined datatype. See +\ref subsecLBDtypeSpecStr below for a simple example. + +Refer to the \ref H5T in the \ref RM. Example functions are #H5Tset_size and #H5Tset_precision. + +\section secLBDtypeSpec Specific Datatypes +On the Examples by API +page under Datatypes +you will find many example programs for creating and reading datasets with different datatypes. + +Below is additional information on some of the datatypes. See +the Examples by API +page for examples of these datatypes. + +\subsection subsecLBDtypeSpec Array Datatype vs Array Dataspace +#H5T_ARRAY is a datatype, and it should not be confused with the dataspace of a dataset. The dataspace +of a dataset can consist of a regular array of elements. For example, the datatype for a dataset +could be an atomic datatype like integer, and the dataset could be an N-dimensional appendable array, +as specified by the dataspace. See #H5Screate and #H5Screate_simple for details. + +Unlimited dimensions and subsetting are not supported when using the #H5T_ARRAY datatype. + +The #H5T_ARRAY datatype was primarily created to address the simple case of a compound datatype +when all members of the compound datatype are of the same type and there is no need to subset by +compound datatype members. Creation of such a datatype is more efficient and I/O also requires +less work, because there is no alignment involved. + +\subsection subsecLBDtypeSpecArr Array Datatype +The array class of datatypes, #H5T_ARRAY, allows the construction of true, homogeneous, +multi-dimensional arrays. Since these are homogeneous arrays, each element of the array +will be of the same datatype, designated at the time the array is created. + +Users may be confused by this datatype, as opposed to a dataset with a simple atomic +datatype (eg. integer) that is an array. See subsecLBDtypeSpec for more information. + +Arrays can be nested. Not only is an array datatype used as an element of an HDF5 dataset, +but the elements of an array datatype may be of any datatype, including another array datatype. + +Array datatypes cannot be subdivided for I/O; the entire array must be transferred from one +dataset to another. + +Within certain limitations, outlined in the next paragraph, array datatypes may be N-dimensional +and of any dimension size. Unlimited dimensions, however, are not supported. Functionality similar +to unlimited dimension arrays is available through the use of variable-length datatypes. + +The maximum number of dimensions, i.e., the maximum rank, of an array datatype is specified by +the HDF5 library constant #H5S_MAX_RANK. The minimum rank is 1 (one). All dimension sizes must +be greater than 0 (zero). + +One array datatype may only be converted to another array datatype if the number of dimensions +and the sizes of the dimensions are equal and the datatype of the first array's elements can be +converted to the datatype of the second array's elements. + +\subsubsection subsubsecLBDtypeSpecArrAPI Array Datatype APIs +There are three functions that are specific to array datatypes: one, #H5Tarray_create, for creating +an array datatype, and two, #H5Tget_array_ndims and #H5Tget_array_dims +for working with existing array datatypes. + +

Creating

+The function #H5Tarray_create creates a new array datatype object. Parameters specify +\li the base datatype of each element of the array, +\li the rank of the array, i.e., the number of dimensions, +\li the size of each dimension, and +\li the dimension permutation of the array, i.e., whether the elements of the array are listed in C or FORTRAN order. + +

Working with existing array datatypes

+When working with existing arrays, one must first determine the the rank, or number of dimensions, of the array. + +The function #H5Tget_array_dims returns the rank of a specified array datatype. + +In many instances, one needs further information. The function #H5Tget_array_dims retrieves the +permutation of the array and the size of each dimension. + +\subsection subsecLBDtypeSpecCmpd Compound + +\subsubsection subsubsecLBDtypeSpecCmpdProp Properties of compound datatypes +A compound datatype is similar to a struct in C or a common block in Fortran. It is a collection of +one or more atomic types or small arrays of such types. To create and use of a compound datatype +you need to refer to various properties of the data compound datatype: +\li It is of class compound. +\li It has a fixed total size, in bytes. +\li It consists of zero or more members (defined in any order) with unique names and which occupy non-overlapping regions within the datum. +\li Each member has its own datatype. +\li Each member is referenced by an index number between zero and N-1, where N is the number of members in the compound datatype. +\li Each member has a name which is unique among its siblings in a compound datatype. +\li Each member has a fixed byte offset, which is the first byte (smallest byte address) of that member in a compound datatype. +\li Each member can be a small array of up to four dimensions. + +Properties of members of a compound datatype are defined when the member is added to the compound type and cannot be subsequently modified. + +\subsubsection subsubsecLBDtypeSpecCmpdDef Defining compound datatypes +Compound datatypes must be built out of other datatypes. First, one creates an empty compound +datatype and specifies its total size. Then members are added to the compound datatype in any order. + +Member names. Each member must have a descriptive name, which is the key used to uniquely identify +the member within the compound datatype. A member name in an HDF5 datatype does not necessarily +have to be the same as the name of the corresponding member in the C struct in memory, although +this is often the case. Nor does one need to define all members of the C struct in the HDF5 +compound datatype (or vice versa). + +Offsets. Usually a C struct will be defined to hold a data point in memory, and the offsets of the +members in memory will be the offsets of the struct members from the beginning of an instance of the +struct. The library defines the macro to compute the offset of a member within a struct: +\code + HOFFSET(s,m) +\endcode +This macro computes the offset of member m within a struct variable s. + +Here is an example in which a compound datatype is created to describe complex numbers whose type +is defined by the complex_t struct. +\code +typedef struct { + double re; /*real part */ + double im; /*imaginary part */ +} complex_t; + +complex_t tmp; /*used only to compute offsets */ +hid_t complex_id = H5Tcreate (H5T_COMPOUND, sizeof tmp); +H5Tinsert (complex_id, "real", HOFFSET(tmp,re), H5T_NATIVE_DOUBLE); +H5Tinsert (complex_id, "imaginary", HOFFSET(tmp,im), H5T_NATIVE_DOUBLE); +\endcode + +\subsection subsecLBDtypeSpecRef Reference +There are two types of Reference datatypes in HDF5: +\li \ref subsubsecLBDtypeSpecRefObj +\li \ref subsubsecLBDtypeSpecRefDset + +\subsubsection subsubsecLBDtypeSpecRefObj Reference to objects +In HDF5, objects (i.e. groups, datasets, and named datatypes) are usually accessed by name. +There is another way to access stored objects -- by reference. + +An object reference is based on the relative file address of the object header in the file +and is constant for the life of the object. Once a reference to an object is created and +stored in a dataset in the file, it can be used to dereference the object it points to. +References are handy for creating a file index or for grouping related objects by storing +references to them in one dataset. + +

Creating and storing references to objects

+The following steps are involved in creating and storing file references to objects: +
    +
  1. Create the objects or open them if they already exist in the file.
  2. +
  3. Create a dataset to store the objects' references, by specifying #H5T_STD_REF_OBJ as the datatype
  4. +
  5. Create and store references to the objects in a buffer, using #H5Rcreate.
  6. +
  7. Write a buffer with the references to the dataset, using #H5Dwrite with the #H5T_STD_REF_OBJ datatype.
  8. +
+ +

Reading references and accessing objects using references

+The following steps are involved: +
    +
  1. Open the dataset with the references and read them. The #H5T_STD_REF_OBJ datatype must be used to describe the memory datatype.
  2. +
  3. Use the read reference to obtain the identifier of the object the reference points to using #H5Rdereference.
  4. +
  5. Open the dereferenced object and perform the desired operations.
  6. +
  7. Close all objects when the task is complete.
  8. +
+ +\subsubsection subsubsecLBDtypeSpecRefDset Reference to a dataset region +A dataset region reference points to a dataset selection in another dataset. +A reference to the dataset selection (region) is constant for the life of the dataset. + +

Creating and storing references to dataset regions

+The following steps are involved in creating and storing references to a dataset region: +\li Create a dataset to store the dataset region (selection), by passing in #H5T_STD_REF_DSETREG for the datatype when calling #H5Dcreate. +\li Create selection(s) in existing dataset(s) using #H5Sselect_hyperslab and/or #H5Sselect_elements. +\li Create reference(s) to the selection(s) using #H5Rcreate and store them in a buffer. +\li Write the references to the dataset regions in the file. +\li Close all objects. + +

Reading references to dataset regions

+The following steps are involved in reading references to dataset regions and referenced dataset regions (selections). +
    +
  1. Open and read the dataset containing references to the dataset regions. +The datatype #H5T_STD_REF_DSETREG must be used during read operation.
  2. +
  3. Use #H5Rdereference to obtain the dataset identifier from the read dataset region reference. + OR + Use #H5Rget_region to obtain the dataspace identifier for the dataset containing the selection from the read dataset region reference. +
  4. +
  5. With the dataspace identifier, the \ref H5S interface functions, H5Sget_select_*, +can be used to obtain information about the selection.
  6. +
  7. Close all objects when they are no longer needed.
  8. +
+ +The dataset with the region references was read by #H5Dread with the #H5T_STD_REF_DSETREG datatype specified. + +The read reference can be used to obtain the dataset identifier by calling #H5Rdereference or by obtaining +obtain spacial information (dataspace and selection) with the call to #H5Rget_region. + +The reference to the dataset region has information for both the dataset itself and its selection. In both functions: +\li The first parameter is an identifier of the dataset with the region references. +\li The second parameter specifies the type of reference stored. In this example, a reference to the dataset region is stored. +\li The third parameter is a buffer containing the reference of the specified type. + +This example introduces several H5Sget_select_* functions used to obtain information about selections: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Examples of HDF5 predefined datatypes
FunctionDescription
#H5Sget_select_npointsReturns the number of elements in the hyperslab
#H5Sget_select_hyper_nblocksReturns the number of blocks in the hyperslab
#H5Sget_select_hyper_blocklistReturns the "lower left" and "upper right" coordinates of the blocks in the hyperslab selection
#H5Sget_select_boundsReturns the coordinates of the "minimal" block containing a hyperslab selection
#H5Sget_select_elem_npointsReturns the number of points in the element selection
#H5Sget_select_elem_pointlistReturns the coordinates of points in the element selection
+ +\subsection subsecLBDtypeSpecStr String +A simple example of creating a derived datatype is using the string datatype, +#H5T_C_S1 (#H5T_FORTRAN_S1) to create strings of more than one character. Strings +can be stored as either fixed or variable length, and may have different rules +for padding of unused storage. + +\subsubsection subsecLBDtypeSpecStrFix Fixed Length 5-character String Datatype +\code + hid_t strtype; /* Datatype ID */ + herr_t status; + + strtype = H5Tcopy (H5T_C_S1); + status = H5Tset_size (strtype, 5); /* create string of length 5 */ +\endcode + +\subsubsection subsecLBDtypeSpecStrVar Variable Length String Datatype +\code + strtype = H5Tcopy (H5T_C_S1); + status = H5Tset_size (strtype, H5T_VARIABLE); +\endcode + +The ability to derive datatypes from pre-defined types allows users to create any number of datatypes, +from simple to very complex. + +As the term implies, variable length strings are strings of varying lengths. They are stored internally +in a heap, potentially impacting efficiency in the following ways: +\li Heap storage requires more space than regular raw data storage. +\li Heap access generally reduces I/O efficiency because it requires individual read or write operations +for each data element rather than one read or write per dataset or per data selection. +\li A variable length dataset consists of pointers to the heaps of data, not the actual data. Chunking +and filters, including compression, are not available for heaps. + +See \ref subsubsec_datatype_other_strings in the \ref UG, for more information on how fixed and variable +length strings are stored. + +\subsection subsecLBDtypeSpecVL Variable Length +Variable-length (VL) datatypes are sequences of an existing datatype (atomic, VL, or compound) +which are not fixed in length from one dataset location to another. In essence, they are similar +to C character strings -- a sequence of a type which is pointed to by a particular type of +pointer -- although they are implemented more closely to FORTRAN strings by including an explicit +length in the pointer instead of using a particular value to terminate the sequence. + +VL datatypes are useful to the scientific community in many different ways, some of which are listed below: +
    +
  • Ragged arrays: Multi-dimensional ragged arrays can be implemented with the last (fastest changing) +dimension being ragged by using a VL datatype as the type of the element stored. (Or as a field in a compound datatype.) +
  • +
  • Fractal arrays: If a compound datatype has a VL field of another compound type with VL fields +(a nested VL datatype), this can be used to implement ragged arrays of ragged arrays, to whatever +nesting depth is required for the user. +
  • +
  • Polygon lists: A common storage requirement is to efficiently store arrays of polygons with +different numbers of vertices. VL datatypes can be used to efficiently and succinctly describe an +array of polygons with different numbers of vertices. +
  • +
  • Character strings: Perhaps the most common use of VL datatypes will be to store C-like VL character +strings in dataset elements or as attributes of objects. +
  • +
  • Indices: An array of VL object references could be used as an index to all the objects in a file +which contain a particular sequence of dataset values. Perhaps an array something like the following: +\code + Value1: Object1, Object3, Object9 + Value2: Object0, Object12, Object14, Object21, Object22 + Value3: Object2 + Value4: + Value5: Object1, Object10, Object12 + . + . +\endcode +
  • +
  • Object Tracking: An array of VL dataset region references can be used as a method of tracking +objects or features appearing in a sequence of datasets. Perhaps an array of them would look like: +\code + Feature1: Dataset1:Region, Dataset3:Region, Dataset9:Region + Feature2: Dataset0:Region, Dataset12:Region, Dataset14:Region, + Dataset21:Region, Dataset22:Region + Feature3: Dataset2:Region + Feature4: + Feature5: Dataset1:Region, Dataset10:Region, Dataset12:Region + . + . +\endcode +
  • +
+ +\subsubsection subsubsecLBDtypeSpecVLMem Variable-length datatype memory management +With each element possibly being of different sequence lengths for a dataset with a VL datatype, +the memory for the VL datatype must be dynamically allocated. Currently there are two methods +of managing the memory for VL datatypes: the standard C malloc/free memory allocation routines +or a method of calling user-defined memory management routines to allocate or free memory. Since +the memory allocated when reading (or writing) may be complicated to release, an HDF5 routine is +provided to traverse a memory buffer and free the VL datatype information without leaking memory. + +\subsubsection subsubsecLBDtypeSpecVLDiv Variable-length datatypes cannot be divided +VL datatypes are designed so that they cannot be subdivided by the library with selections, etc. +This design was chosen due to the complexities in specifying selections on each VL element of a +dataset through a selection API that is easy to understand. Also, the selection APIs work on +dataspaces, not on datatypes. At some point in time, we may want to create a way for dataspaces +to have VL components to them and we would need to allow selections of those VL regions, but +that is beyond the scope of this document. + +\subsubsection subsubsecLBDtypeSpecVLErr What happens if the library runs out of memory while reading? +It is possible for a call to #H5Dread to fail while reading in VL datatype information if the memory +required exceeds that which is available. In this case, the #H5Dread call will fail gracefully and any +VL data which has been allocated prior to the memory shortage will be returned to the system via the +memory management routines detailed below. It may be possible to design a partial read API function +at a later date, if demand for such a function warrants. + +\subsubsection subsubsecLBDtypeSpecVLStr Strings as variable-length datatypes +Since character strings are a special case of VL data that is implemented in many different ways on +different machines and in different programming languages, they are handled somewhat differently from +other VL datatypes in HDF5. + +HDF5 has native VL strings for each language API, which are stored the same way on disk, but are +exported through each language API in a natural way for that language. When retrieving VL strings +from a dataset, users may choose to have them stored in memory as a native VL string or in HDF5's +#hvl_t struct for VL datatypes. + +VL strings may be created in one of two ways: by creating a VL datatype with a base type of +#H5T_C_S1 and setting its length to #H5T_VARIABLE. The second method is used to access native VL strings in memory. The +library will convert between the two types, but they are stored on disk using different datatypes +and have different memory representations. + +Multi-byte character representations, such as \em UNICODE or \em wide characters in C/C++, will need the +appropriate character and string datatypes created so that they can be described properly through +the datatype API. Additional conversions between these types and the current ASCII characters +will also be required. + +Variable-width character strings (which might be compressed data or some other encoding) are not +currently handled by this design. We will evaluate how to implement them based on user feedback. + +\subsubsection subsubsecLBDtypeSpecVLAPIs Variable-length datatype APIs + +

Creation

+VL datatypes are created with the #H5Tvlen_create function as follows: +\code +type_id = H5Tvlen_create(hid_t base_type_id); +\endcode +The base datatype will be the datatype that the sequence is composed of, characters for character +strings, vertex coordinates for polygon lists, etc. The base datatype specified for the VL datatype +can be of any HDF5 datatype, including another VL datatype, a compound datatype, or an atomic datatype. + +

Querying base datatype of VL datatype

+It may be necessary to know the base datatype of a VL datatype before memory is allocated, etc. +The base datatype is queried with the #H5Tget_super function, described in the \ref H5T documentation. + +

Querying minimum memory required for VL information

+It order to predict the memory usage that #H5Dread may need to allocate to store VL data while +reading the data, the #H5Dvlen_get_buf_size function is provided: +\code +herr_t H5Dvlen_get_buf_size(hid_t dataset_id, hid_t type_id, hid_t space_id, hsize_t *size) +\endcode +This routine checks the number of bytes required to store the VL data from the dataset, using +the \em space_id for the selection in the dataset on disk and the \em type_id for the memory representation +of the VL data in memory. The *\em size value is modified according to how many bytes are required +to store the VL data in memory. + +

Specifying how to manage memory for the VL datatype

+The memory management method is determined by dataset transfer properties passed into the +#H5Dread and #H5Dwrite functions with the dataset transfer property list. + +Default memory management is set by using #H5P_DEFAULT for the dataset transfer +property list identifier. If #H5P_DEFAULT is used with #H5Dread, the system \em malloc and \em free +calls will be used for allocating and freeing memory. In such a case, #H5P_DEFAULT should +also be passed as the property list identifier to #H5Dvlen_reclaim. + +The rest of this subsection is relevant only to those who choose not to use default memory management. + +The user can choose whether to use the system \em malloc and \em free calls or user-defined, or custom, +memory management functions. If user-defined memory management functions are to be used, the +memory allocation and free routines must be defined via #H5Pset_vlen_mem_manager(), as follows: +\code +herr_t H5Pset_vlen_mem_manager(hid_t plist_id, H5MM_allocate_t alloc, void *alloc_info, H5MM_free_t free, void *free_info) +\endcode +The \em alloc and \em free parameters identify the memory management routines to be used. If the user +has defined custom memory management routines, \em alloc and/or \em free should be set to make those +routine calls (i.e., the name of the routine is used as the value of the parameter); if the user +prefers to use the system's \em malloc and/or \em free, the \em alloc and \em free parameters, respectively, should be set to \em NULL + +The prototypes for the user-defined functions would appear as follows: +\code +typedef void *(*H5MM_allocate_t)(size_t size, void *info) ; typedef void (*H5MM_free_t)(void *mem, void *free_info) ; +\endcode +The \em alloc_info and \em free_info parameters can be used to pass along any required information to +the user's memory management routines. + +In summary, if the user has defined custom memory management routines, the name(s) of the routines +are passed in the \em alloc and \em free parameters and the custom routines' parameters are passed in the +\em alloc_info and \em free_info parameters. If the user wishes to use the system \em malloc and \em free functions, +the \em alloc and/or \em free parameters are set to \em NULL and the \em alloc_info and \em free_info parameters are ignored. + +

Recovering memory from VL buffers read in

+The complex memory buffers created for a VL datatype may be reclaimed with the #H5Dvlen_reclaim +function call, as follows: +\code +herr_t H5Dvlen_reclaim(hid_t type_id, hid_t space_id, hid_t plist_id, void *buf); +\endcode + +The \em type_id must be the datatype stored in the buffer, \em space_id describes the selection for the +memory buffer to free the VL datatypes within, \em plist_id is the dataset transfer property list +which was used for the I/O transfer to create the buffer, and \em buf is the pointer to the buffer +to free the VL memory within. The VL structures (#hvl_t) in the user's buffer are modified to zero +out the VL information after it has been freed. + +If nested VL datatypes were used to create the buffer, this routine frees them from the bottom up, +releasing all the memory without creating memory leaks. + +
+Navigate back: \ref index "Main" / \ref GettingStarted / \ref LearnBasics + +*/ diff --git a/doxygen/dox/LearnBasics3.dox b/doxygen/dox/LearnBasics3.dox new file mode 100644 index 0000000..2fe0f52 --- /dev/null +++ b/doxygen/dox/LearnBasics3.dox @@ -0,0 +1,1015 @@ +/** @page LBPropsList Property Lists Basics +Navigate back: \ref index "Main" / \ref GettingStarted / \ref LearnBasics +
+ +\section secLBPList What is a Property (or Property List)? +In HDF5, a property or property list is a characteristic or feature associated with an HDF5 object. +There are default properties which handle the most common needs. These default properties are +specified by passing in #H5P_DEFAULT for the Property List parameter of a function. Default properties +can be modified by use of the \ref H5P interface and function parameters. + +The \ref H5P API allows a user to take advantage of the more powerful features in HDF5. It typically +supports unusual cases when creating or accessing HDF5 objects. There is a programming model for +working with Property Lists in HDF5 (see below). + +For examples of modifying a property list, see these tutorial topics: +\li \see \ref LBDsetLayout +\li \see \ref LBExtDset +\li \see \ref LBComDset + +There are many Property Lists associated with creating and accessing objects in HDF5. See the +\ref H5P Interface documentation in the HDF5 \ref RM for a list of the different +properties associated with HDF5 interfaces. + +In summary: +\li Properties are features of HDF5 objects, that can be changed by use of the Property List API and function parameters. +\li Property lists provide a mechanism for adding functionality to HDF5 calls without increasing the number of arguments used for a given call. +\li The Property List API supports unusual cases when creating and accessing HDF5 objects. + +\section secLBPListProg Programming Model +Default properties are specified by simply passing in #H5P_DEFAULT (C) / H5P_DEFAULT_F (F90) for +the property list parameter in those functions for which properties can be changed. + +The programming model for changing a property list is as follows: +\li Create a copy or "instance" of the desired pre-defined property type, using the #H5Pcreate call. This +will return a property list identifier. Please see the \ref RM entry for #H5Pcreate, for a comprehensive +list of the property types. +\li With the property list identifier, modify the property, using the \ref H5P APIs. +\li Modify the object feature, by passing the property list identifier into the corresponding HDF5 object function. +\li Close the property list when done, using #H5Pclose. + +
+Navigate back: \ref index "Main" / \ref GettingStarted / \ref LearnBasics + +@page LBDsetLayout Dataset Storage Layout +Navigate back: \ref index "Main" / \ref GettingStarted / \ref LearnBasics +
+ +\section secLBDsetLayoutDesc Description of a Dataset + +\section secLBDsetLayout Dataset Storage Layout +The storage information, or storage layout, defines how the raw data values in the dataset are +physically stored on disk. There are three ways that a dataset can be stored: +\li contiguous +\li chunked +\li compact + +See the #H5Pset_layout/#H5Pget_layout APIs for details. + +\subsection subsecLBDsetLayoutCont Contiguous +If the storage layout is contiguous, then the raw data values will be stored physically adjacent +to each other in the HDF5 file (in one contiguous block). This is the default layout for a dataset. +In other words, if you do not explicitly change the storage layout for the dataset, then it will +be stored contiguously. + + + + +
+\image html tutr-locons.png +
+ +\subsection subsecLBDsetLayoutChunk Chunked +With a chunked storage layout the data is stored in equal-sized blocks or chunks of +a pre-defined size. The HDF5 library always writes and reads the entire chunk: + + + + +
+\image html tutr-lochk.png +
+ +Each chunk is stored as a separate contiguous block in the HDF5 file. There is a chunk index +which keeps track of the chunks associated with a dataset: + + + + +
+\image html tutr-lochks.png +
+ + +\subsubsection susubsecLBDsetLayoutChunkWhy Why Chunking ? +Chunking is required for enabling compression and other filters, as well as for creating extendible +or unlimited dimension datasets. + +It is also commonly used when subsetting very large datasets. Using the chunking layout can +greatly improve performance when subsetting large datasets, because only the chunks required +will need to be accessed. However, it is easy to use chunking without considering the consequences +of the chunk size, which can lead to strikingly poor performance. + +Note that a chunk always has the same rank as the dataset and the chunk's dimensions do not need +to be factors of the dataset dimensions. + +Writing or reading a chunked dataset is transparent to the application. You would use the same +set of operations that you would use for a contiguous dataset. For example: +\code + H5Dopen (...); + H5Sselect_hyperslab (...); + H5Dread (...); +\endcode + +\subsubsection susubsecLBDsetLayoutChunkProb Problems Using Chunking +Issues that can cause performance problems with chunking include: +\li Chunks are too small. +If a very small chunk size is specified for a dataset it can cause the dataset to be excessively +large and it can result in degraded performance when accessing the dataset. The smaller the chunk +size the more chunks that HDF5 has to keep track of, and the more time it will take to search for a chunk. +\li Chunks are too large. +An entire chunk has to be read and uncompressed before performing an operation. There can be a +performance penalty for reading a small subset, if the chunk size is substantially larger than +the subset. Also, a dataset may be larger than expected if there are chunks that only contain a +small amount of data. +\li A chunk does not fit in the Chunk Cache. +Every chunked dataset has a chunk cache associated with it that has a default size of 1 MB. The +purpose of the chunk cache is to improve performance by keeping chunks that are accessed frequently +in memory so that they do not have to be accessed from disk. If a chunk is too large to fit in the +chunk cache, it can significantly degrade performance. However, the size of the chunk cache can be +increased by calling #H5Pset_chunk_cache. + +It is a good idea to: +\li Avoid very small chunk sizes, and be aware of the 1 MB chunk cache size default. +\li Test the data with different chunk sizes to determine the optimal chunk size to use. +\li Consider the chunk size in terms of the most common access patterns that will be used once the dataset has been created. + +\subsection subsecLBDsetLayoutCom Compact +A compact dataset is one in which the raw data is stored in the object header of the dataset. +This layout is for very small datasets that can easily fit in the object header. + +The compact layout can improve storage and access performance for files that have many very tiny +datasets. With one I/O access both the header and data values can be read. The compact layout reduces +the size of a file, as the data is stored with the header which will always be allocated for a dataset. +However, the object header is 64 KB in size, so this layout can only be used for very small datasets. + +\section secLBDsetLayoutProg Programming Model to Modify the Storage Layout +To modify the storage layout, the following steps must be done: +\li Create a Dataset Creation Property list. (See #H5Pcreate) +\li Modify the property list. +To use chunked storage layout, call: #H5Pset_chunk +To use the compact storage layout, call: #H5Pset_layout +\li Create a dataset with the modified property list. (See #H5Dcreate) +\li Close the property list. (See #H5Pclose) +For example code, see the \ref HDF5Examples page. +Specifically look at the Examples by API. +There are examples for different languages. + +The C example to create a chunked dataset is: +h5ex_d_chunk.c +The C example to create a compact dataset is: +h5ex_d_compact.c + +\section secLBDsetLayoutChange Changing the Layout after Dataset Creation +The dataset layout is a Dataset Creation Property List. This means that once the dataset has been +created the dataset layout cannot be changed. The h5repack utility can be used to write a file +to a new with a new layout. + +\section secLBDsetLayoutSource Sources of Information +Chunking in HDF5 +(See the documentation on Advanced Topics in HDF5) +\see \ref sec_plist in the HDF5 \ref UG. + +
+Navigate back: \ref index "Main" / \ref GettingStarted / \ref LearnBasics + + +@page LBExtDset Extendible Datasets +Navigate back: \ref index "Main" / \ref GettingStarted / \ref LearnBasics +
+ +\section secLBExtDsetCreate Creating an Extendible Dataset +An extendible dataset is one whose dimensions can grow. HDF5 allows you to define a dataset to have +certain initial dimensions, then to later increase the size of any of the initial dimensions. + +HDF5 requires you to use chunking to define extendible datasets. This makes it possible to extend +datasets efficiently without having to excessively reorganize storage. (To use chunking efficiently, +be sure to see the advanced topic, Chunking in HDF5.) + +The following operations are required in order to extend a dataset: +\li Declare the dataspace of the dataset to have unlimited dimensions for all dimensions that might eventually be extended. +\li Set dataset creation properties to enable chunking. +\li Create the dataset. +\li Extend the size of the dataset. + +\section secLBExtDsetProg Programming Example + +\subsection subsecLBExtDsetProgDesc Description +See \ref LBExamples for the examples used in the \ref LearnBasics tutorial. + +The example shows how to create a 3 x 3 extendible dataset, write to that dataset, extend the dataset +to 10x3, and write to the dataset again. + +For details on compiling an HDF5 application: +[ \ref LBCompiling ] + +\subsection subsecLBExtDsetProgRem Remarks +\li An unlimited dimension dataspace is specified with the #H5Screate_simple call, by passing in +#H5S_UNLIMITED as an element of the maxdims array. +\li The #H5Pcreate call creates a new property as an instance of a property list class. For creating +an extendible array dataset, pass in #H5P_DATASET_CREATE for the property list class. +\li The #H5Pset_chunk call modifies a Dataset Creation Property List instance to store a chunked +layout dataset and sets the size of the chunks used. +\li To extend an unlimited dimension dataset use the the #H5Dset_extent call. Please be aware that +after this call, the dataset's dataspace must be refreshed with #H5Dget_space before more data can be accessed. +\li The #H5Pget_chunk call retrieves the size of chunks for the raw data of a chunked layout dataset. +\li Once there is no longer a need for a Property List instance, it should be closed with the #H5Pclose call. + +
+Navigate back: \ref index "Main" / \ref GettingStarted / \ref LearnBasics + +@page LBComDset Compressed Datasets +Navigate back: \ref index "Main" / \ref GettingStarted / \ref LearnBasics +
+ +\section secLBComDsetCreate Creating a Compressed Dataset +HDF5 requires you to use chunking to create a compressed dataset. (To use chunking efficiently, +be sure to see the advanced topic, Chunking in HDF5.) + +The following operations are required in order to create a compressed dataset: +\li Create a dataset creation property list. +\li Modify the dataset creation property list instance to enable chunking and to enable compression. +\li Create the dataset. +\li Close the dataset creation property list and dataset. + +For more information on compression, see the FAQ question on Using Compression in HDF5. + +\section secLBComDsetProg Programming Example + +\subsection subsecLBComDsetProgDesc Description +See \ref LBExamples for the examples used in the \ref LearnBasics tutorial. + +The example creates a chunked and ZLIB compressed dataset. It also includes comments for what needs +to be done to create an SZIP compressed dataset. The example then reopens the dataset, prints the +filter information, and reads the dataset. + +For details on compiling an HDF5 application: +[ \ref LBCompiling ] + +\subsection subsecLBComDsetProgRem Remarks +\li The #H5Pset_chunk call modifies a Dataset Creation Property List instance to store a chunked layout +dataset and sets the size of the chunks used. +\li The #H5Pset_deflate call modifies the Dataset Creation Property List instance to use ZLIB or DEFLATE +compression. The #H5Pset_szip call modifies it to use SZIP compression. There are different compression +parameters required for each compression method. +\li SZIP compression can only be used with atomic datatypes that are integer, float, or char. It cannot be +applied to compound, array, variable-length, enumerations, or other user-defined datatypes. The call +to #H5Dcreate will fail if attempting to create an SZIP compressed dataset with a non-allowed datatype. +The conflict can only be detected when the property list is used. + +
+Navigate back: \ref index "Main" / \ref GettingStarted / \ref LearnBasics + +@page LBContents Discovering the Contents of an HDF5 File +Navigate back: \ref index "Main" / \ref GettingStarted / \ref LearnBasics +
+ +\section secLBContents Discovering what is in an HDF5 file +HDFView and h5dump are standalone tools which cannot be called within an application, and using +#H5Dopen and #H5Dread require that you know the name of the HDF5 dataset. How would an application +that has no prior knowledge of an HDF5 file be able to determine or discover the contents of it, +much like HDFView and h5dump? + +The answer is that there are ways to discover the contents of an HDF5 file, by using the +\ref H5G, \ref H5L and \ref H5O APIs: +\li The \ref H5G interface (covered earlier) consists of routines for working with groups. A group is +a structure that can be used to organize zero or more HDF5 objects, not unlike a Unix directory. +\li The \ref H5L interface consists of link routines. A link is a path between groups. The \ref H5L interface +allows objects to be accessed by use of these links. +\li The \ref H5O interface consists of routines for working with objects. Datasets, groups, and committed +datatypes are all objects in HDF5. + +Interface routines that simplify the process: +\li #H5Literate traverses the links in a specified group, in the order of the specified index, using a +user-defined callback routine. (A callback function is one that will be called when a certain condition +is met, at a certain point in the future.) +\li #H5Ovisit / #H5Lvisit recursively visit all objects/links accessible from a specified object/group. + + +\section secLBContentsProg Programming Example + +\subsection subsecLBContentsProgUsing Using #H5Literate, #H5Lvisit and #H5Ovisit +For example code, see the \ref HDF5Examples page. +Specifically look at the Examples by API. +There are examples for different languages, where examples of using #H5Literate and #H5Ovisit/#H5Lvisit are included. + +The h5ex_g_traverse example traverses a file using H5Literate: +\li C: h5ex_g_traverse.c +\li F90: h5ex_g_traverse_F03.f90 + +The h5ex_g_visit example traverses a file using H5Ovisit and H5Lvisit: +\li C: h5ex_g_visit.c +\li F90: h5ex_g_visit_F03.f90 + +
+Navigate back: \ref index "Main" / \ref GettingStarted / \ref LearnBasics + +@page LBQuiz Learning the basics QUIZ +Navigate back: \ref index "Main" / \ref GettingStarted / \ref LearnBasics +
+ +\ref LBFileOrg +
    +
  1. Name and describe the two primary objects that can be stored in an HDF5 file. +
  2. +
  3. What is an attribute? +
  4. +
  5. Give the path name for an object called harry that is a member of a group called dick, which, in turn, is a member of the root group. +
  6. +
+ +\ref LBAPI +
    +
  1. Describe the purpose of each of the following HDF5 APIs: +\code + H5A, H5D, H5E, H5F, H5G, H5T, H5Z +\endcode +
  2. +
+ +\ref LBFileCreate +
    +
  1. What two HDF5 routines must be called to create an HDF5 file? +
  2. +
  3. What include file must be included in any file that uses the HDF5 library? +
  4. +
  5. An HDF5 file is never completely empty because as soon as it is created, it automatically contains a certain primary object. What is that object? +
  6. +
+ +\ref LBDsetCreate +
    +
  1. Name and describe two major datatype categories. +
  2. +
  3. List the HDF5 atomic datatypes. Give an example of a predefined datatype. How would you create a string dataset? +
  4. +
  5. What does the dataspace describe? What are the major characteristics of the simple dataspace? +
  6. +
  7. What information needs to be passed to the #H5Dcreate function, i.e., what information is needed to describe a dataset at creation time? +
  8. +
+ + +\ref LBDsetRW +
    +
  1. What are six pieces of information which need to be specified for reading and writing a dataset? +
  2. +
  3. Why are both the memory dataspace and file dataspace needed for read/write operations, while only the memory datatype is required? +
  4. +
  5. In Figure 6.1, what does this line mean? +\code +DATASPACE { SIMPLE (4 , 6 ) / ( 4 , 6 ) } +\endcode +
  6. +
+ + +\ref LBAttrCreate +
    +
  1. What is an attribute? +
  2. +
  3. Can partial I/O operations be performed on attributes? +
  4. +
+ + +\ref LBGrpCreate +
    +
  1. What are the two primary objects that can be included in a group? +
  2. +
+ + +\ref LBGrpCreateNames +
    +
  1. Group names can be specified in two ways. What are these two types of group names? +
  2. +
  3. You have a dataset named moo in the group boo, which is in the group foo, which, in turn, +is in the root group. How would you specify an absolute name to access this dataset? +
  4. +
+ + +\ref LBGrpDset +
    +
  1. Describe a way to access the dataset moo described in the previous section +(question 2) using a relative name. Describe a way to access the same dataset using an absolute name. +
  2. +
+ +
+Navigate back: \ref index "Main" / \ref GettingStarted / \ref LearnBasics + +@page LBQuizAnswers Learning the basics QUIZ with Answers +Navigate back: \ref index "Main" / \ref GettingStarted / \ref LearnBasics +
+ +\ref LBFileOrg +
    +
  1. Name and describe the two primary objects that can be stored in an HDF5 file. + + + + + +
    Answer +Group: A grouping structure containing zero or more HDF5 objects, together with supporting metadata.
    +Dataset: A multidimensional array of data elements, together with supporting metadata. +
    +
  2. +
  3. What is an attribute? + + + + + +
    Answer +An HDF5 attribute is a user-defined HDF5 structure that provides extra information about an HDF5 object. +
    +
  4. +
  5. Give the path name for an object called harry that is a member of a group called dick, which, in turn, is a member of the root group. + + + + + +
    Answer +/dick/harry +
    +
  6. +
+ +\ref LBAPI +
    +
  1. Describe the purpose of each of the following HDF5 APIs: +\code + H5A, H5D, H5E, H5F, H5G, H5T, H5Z +\endcode + + + + + +
    Answer +H5A: Attribute access and manipulation routines +
    +H5D: Dataset access and manipulation routines +
    +H5E: Error handling routines H5F: File access routines +
    +H5G: Routines for creating and operating on groups +
    +H5T: Routines for creating and manipulating the datatypes of dataset elements +
    +H5Z: Data compression routines +
    +
  2. +
+ +\ref LBFileCreate +
    +
  1. What two HDF5 routines must be called to create an HDF5 file? + + + + + +
    Answer +#H5Fcreate and #H5Fclose. +
    +
  2. +
  3. What include file must be included in any file that uses the HDF5 library? + + + + + +
    Answer +hdf5.h must be included because it contains definitions and declarations used by the library. +
    +
  4. +
  5. An HDF5 file is never completely empty because as soon as it is created, it automatically contains a certain primary object. What is that object? + + + + + +
    Answer +The root group. +
    +
  6. +
+ +\ref LBDsetCreate +
    +
  1. Name and describe two major datatype categories. + + + + + +
    Answer +Atomic datatype: An atomic datatype cannot be decomposed into smaller units at the API level. +
    +Compound datatype: A compound datatype is a collection of atomic and compound datatypes, or small arrays of such types. +
    +
  2. +
  3. List the HDF5 atomic datatypes. Give an example of a predefined datatype. How would you create a string dataset? + + + + + +
    Answer +There are six HDF5 atomic datatypes: integer, floating point, date and time, character string, bit field, and opaque. +
    +Examples of predefined datatypes include the following:
    +\li #H5T_IEEE_F32LE - 4-byte little-endian, IEEE floating point +\li #H5T_NATIVE_INT - native integer + +You would create a string dataset with the #H5T_C_S1 datatype, and set the size of the string with the #H5Tset_size call. +
    +
  4. +
  5. What does the dataspace describe? What are the major characteristics of the simple dataspace? + + + + + +
    Answer +The dataspace describes the dimensionality of the dataset. A simple dataspace is characterized by its rank and dimension sizes. +
    +
  6. +
  7. What information needs to be passed to the #H5Dcreate function, i.e., what information is needed to describe a dataset at creation time? + + + + + +
    Answer +The dataset location, name, dataspace, datatype, and dataset creation property list. +
    +
  8. +
+ + +\ref LBDsetRW +
    +
  1. What are six pieces of information which need to be specified for reading and writing a dataset? + + + + + +
    Answer +The dataset identifier, the dataset's datatype and dataspace in memory, the dataspace in the file, +the dataset transfer property list, and a data buffer. +
    +
  2. +
  3. Why are both the memory dataspace and file dataspace needed for read/write operations, while only the memory datatype is required? + + + + + +
    Answer +A dataset's file datatype is not required for a read/write operation because the file datatype is specified +when the dataset is created and cannot be changed. Both file and memory dataspaces are required for dataset +subsetting and for performing partial I/O operations. +
    +
  4. +
  5. In Figure 6.1, what does this line mean? +\code +DATASPACE { SIMPLE (4 , 6 ) / ( 4 , 6 ) } +\endcode + + + + + +
    Answer +It means that the dataset dset has a simple dataspace with the current dimensions (4,6) and the maximum size of the dimensions (4,6). +
    +
  6. +
+ + +\ref LBAttrCreate +
    +
  1. What is an attribute? + + + + + +
    Answer +An attribute is a dataset attached to an object. It describes the nature and/or the intended usage of the object. +
    +
  2. +
  3. Can partial I/O operations be performed on attributes? + + + + + +
    Answer +No. +
    +
  4. +
+ + +\ref LBGrpCreate +
    +
  1. What are the two primary objects that can be included in a group? + + + + + +
    Answer +A group and a dataset. +
    +
  2. +
+ + +\ref LBGrpCreateNames +
    +
  1. Group names can be specified in two ways. What are these two types of group names? + + + + + +
    Answer +Relative and absolute. +
    +
  2. +
  3. You have a dataset named moo in the group boo, which is in the group foo, which, in turn, +is in the root group. How would you specify an absolute name to access this dataset? + + + + + +
    Answer +/foo/boo/moo +
    +
  4. +
+ + +\ref LBGrpDset +
    +
  1. Describe a way to access the dataset moo described in the previous section +(question 2) using a relative name. Describe a way to access the same dataset using an absolute name. + + + + + +
    Answer +Access the group /foo and get the group ID. Access the group boo using the group ID obtained in Step 1. +Access the dataset moo using the group ID obtained in Step 2. +\code +gid = H5Gopen (file_id, "/foo", 0); /* absolute path */ +gid1 = H5Gopen (gid, "boo", 0); /* relative path */ +did = H5Dopen (gid1, "moo"); /* relative path */ +\endcode +Access the group /foo and get the group ID. Access the dataset boo/moo with the group ID just obtained. +\code +gid = H5Gopen (file_id, "/foo", 0); /* absolute path */ +did = H5Dopen (gid, "boo/moo"); /* relative path */ +\endcode +Access the dataset with an absolute path. +\code +did = H5Dopen (file_id, "/foo/boo/moo"); /* absolute path */ +\endcode +
    +
  2. +
+ +
+Navigate back: \ref index "Main" / \ref GettingStarted / \ref LearnBasics + +@page LBCompiling Compiling HDF5 Applications +Navigate back: \ref index "Main" / \ref GettingStarted / \ref LearnBasics +
+ +\section secLBCompiling Tools and Instructions on Compiling +Compiling applications to use the HDF5 Library can be as simple as executing: +\code +h5cc -o myprog myprog.c +\endcode + +As an application's file base evolves, there are better solutions using autotools and makefiles or +CMake and CMakeLists.txt files. Many tutorials and references can be found with a simple search. + +This tutorial section will discuss the use of compile scripts on Linux. +See the \ref secLBCompilingVS section for compiling with Visual Studio. + +\section secLBCompilingLinux Compile Scripts +When the library is built, the following compile scripts are included: +\li h5cc: compile script for HDF5 C programs +\li h5fc: compile script for HDF5 F90 programs +\li h5c++: compile script for HDF5 C++ programs + +These scripts are easilye used to compile single file applications, such as those included in the tutorial. + + + + + +
Warning +The h5cc/h5fc/h5c++ compile scripts are included when building with configure. Versions of +these compile scripts have also been added to CMake for Linux ONLY. The CMake versions rely on pkgconfig files. +
+ +

Examples of Using the Unix Compile Scripts:

+Following are examples of compiling and running an application with the Unix compile scripts: +\code + h5fc myprog.f90 + ./a.out + + h5cc -o myprog myprog.c + ./myprog +\endcode + +To see how the libraries linked in with a compile script were configured and built, use the +-showconfig option. For example, if using h5cc type: +\code + h5cc -showconfig +\endcode + +

Detailed Description of Unix Compile Scripts:

+The h5cc, h5c++, and h5fc compile scripts come with the HDF5 binary distributions (include files, +libraries, and utilities) for the platforms we support. The h5c++ and h5fc utilities are ONLY present +if the library was built with C++ and Fortran. + +\section secLBCompilingVS Using Visual Studio + + 1. If you are building on 64-bit Windows, find the "Platform" dropdown + and select "x64". Also select the correct Configuration (Debug, Release, RelWithDebInfo, etc) + + 2. Set up path for external headers + + The HDF5 install path settings will need to be in the project property sheets per project. + Go to "Project" and select "Properties", find "Configuration Properties", + and then "C/C++". + + 2.1 Add the header path to the "Additional Include Directories" setting. Under "C/C++" + find "General" and select "Additional Include Directories". Select "Edit" from the dropdown + and add the HDF5 install/include path to the list. + (Ex: "C:\Program Files\HDF_Group\HDF5\1.10.9\include") + + 2.2 Building applications with the dynamic/shared hdf5 libraries requires + that the "H5_BUILT_AS_DYNAMIC_LIB" compile definition be used. Under "C/C++" + find "Preprocessor" and select "Preprocessor Definitions". Select "Edit" from the dropdown + and add "H5_BUILT_AS_DYNAMIC_LIB" to the list. + + 3. Set up path for external libraries + + The HDF5 install path/lib settings will need to be in the project property sheets per project. + Go to "Project" and select "Properties", find "Configuration Properties", + and then "Linker". + + 3.1 Add the libraries to the "Additional Dependencies" setting. Under "Linker" + find "Input" and select "Additional Dependencies". Select "Edit" from the dropdown + and add the required HDF5 install/lib path to the list. + (Ex: "C:\Program Files\HDF_Group\HDF5\1.10.9\lib\hdf5.lib") + + 3.2 For static builds, the external libraries should be added. + For example, to compile a C++ application, enter: + libhdf5_cpp.lib libhdf5.lib libz.lib libszaec.lib libaec.lib + +\section secLBCompilingLibs HDF5 Libraries +Following are the libraries included with HDF5. Whether you are using the Unix compile scripts or +Makefiles, or are compiling on Windows, these libraries are or may need to be specified. The order +they are specified is important on Linux: + + + + + + + + + + + + + + +
HDF5 Static Libraries
LibraryLinux NameMac NameWindows Name
+\code +HDF5 High Level C++ APIs +HDF5 C++ Library +HDF5 High Level Fortran APIs +HDF5 Fortran Library +HDF5 High Level C APIs +HDF5 C Library +\endcode + +\code +libhdf5_hl_cpp.a +libhdf5_cpp.a +libhdf5hl_fortran.a +libhdf5_fortran.a +libhdf5_hl.a +libhdf5.a +\endcode + +\code +libhdf5_hl_cpp.a +libhdf5_cpp.a +libhdf5hl_fortran.a +libhdf5_fortran.a +libhdf5_hl.a +libhdf5.a +\endcode + +Windows +\code +libhdf5_hl_cpp.lib +libhdf5_cpp.lib +libhdf5hl_fortran.lib +libhdf5_fortran.lib +libhdf5_hl.lib +libhdf5.lib +\endcode +
+ + + + + + + + + + + + + + +
HDF5 Shared Libraries
LibraryLinux NameMac NameWindows Name
+\code +HDF5 High Level C++ APIs +HDF5 C++ Library +HDF5 High Level Fortran APIs +HDF5 Fortran Library +HDF5 High Level C APIs +HDF5 C Library +\endcode + +\code +libhdf5_hl_cpp.so +libhdf5_cpp.so +libhdf5hl_fortran.so +libhdf5_fortran.so +libhdf5_hl.so +libhdf5.so +\endcode + +\code +libhdf5_hl_cpp.dylib +libhdf5_cpp.dylib +libhdf5hl_fortran.dylib +libhdf5_fortran.dylib +libhdf5_hl.dylib +libhdf5.dylib +\endcode + +\code +hdf5_hl_cpp.lib +hdf5_cpp.lib +hdf5hl_fortran.lib +hdf5_fortran.lib +hdf5_hl.lib +hdf5.lib +\endcode +
+ + + + + + + + + + + + + + + +
External Libraries
LibraryLinux NameMac NameWindows Name
+\code +SZIP Compression Library +SZIP Compression Library +ZLIB or DEFLATE Compression Library +\endcode + +\code +libszaec.a +libaec.a +libz.a +\endcode + +\code +libszaec.a +libaec.a +libz.a +\endcode + +\code +libszaec.lib +libaec.lib +libz.lib +\endcode +
+ +The pre-compiled binaries, in particular, are built (if at all possible) with these libraries as well as with +SZIP and ZLIB. If using shared libraries you may need to add the path to the library to LD_LIBRARY_PATH on Linux +or on WINDOWS you may need to add the path to the bin folder to PATH. + +\section secLBCompilingCMake Compiling an Application with CMake + +\subsection subsecLBCompilingCMakeScripts CMake Scripts for Building Applications +Simple scripts are provided for building applications with different languages and options. +See CMake Scripts for Building Applications. + +For a more complete script (and to help resolve issues) see the script provided with the HDF5 Examples project. + +\subsection subsecLBCompilingCMakeExamples HDF5 Examples +The installed HDF5 can be verified by compiling the HDF5 Examples project, included with the CMake built HDF5 binaries +in the share folder or you can go to the HDF5 Examples github repository. + +Go into the share directory and follow the instructions in USING_CMake_examples.txt to build the examples. + +In general, users must first set the HDF5_ROOT environment variable to the installed location of the CMake +configuration files for HDF5. For example, on Windows the following path might be set: + +\code + HDF5_ROOT=C:/Program Files/HDF_Group/HDF5/1.N.N +\endcode + +\subsection subsecLBCompilingCMakeTroubless Troubleshooting CMake +

How do you use find_package with HDF5?

+To use find_package you will first need to make sure that HDF5_ROOT is set correctly. For setting this +environment variable see the Preconditions in the USING_HDF5_CMake.txt file in the share directory. + +See the CMakeLists.txt file provided with these examples for how to use find_package with HDF5. + +Please note that the find_package invocation changed to require "shared" or "static": +\code + FIND_PACKAGE(HDF5 COMPONENTS C HL NO_MODULE REQUIRED shared) + FIND_PACKAGE(HDF5 COMPONENTS C HL NO_MODULE REQUIRED static) +\endcode + +Previously, the find_package invocation was: +\code + FIND_PACKAGE(HDF5 COMPONENTS C HL NO_MODULE REQUIRED) +\endcode + +

My platform/compiler is not included. Can I still use the configuration files?

+Yes, you can but you will have to edit the HDF5_Examples.cmake file and update the variable: +\code + CTEST_CMAKE_GENERATOR +\endcode + +The generators for your platform can be seen by typing: +\code + cmake --help +\endcode + +

What do I do if the build fails?

+I received an error during the build and the application binary is not in the +build directory as I expected. How do I determine what the problem is? + +If the error is not clear, then the first thing you may want to do is replace the -V (Dash Uppercase Vee) +option for ctest in the build script to -VV (Dash Uppercase Vee Uppercase Vee). Then remove the build +directory and re-run the build script. The output should be more verbose. + +If the error is still not clear, then check the log files. You will find those in the build directory. +For example, on Unix the log files will be in: +\code + build/Testing/Temporary/ +\endcode +There are log files for the configure, test, and build. + +
+Navigate back: \ref index "Main" / \ref GettingStarted / \ref LearnBasics + +@page LBTraining Training Videos +Navigate back: \ref index "Main" / \ref GettingStarted / \ref LearnBasics +
+ +Training Videos + +
+Navigate back: \ref index "Main" / \ref GettingStarted / \ref LearnBasics + +*/ diff --git a/doxygen/dox/LearnHDFView.dox b/doxygen/dox/LearnHDFView.dox new file mode 100644 index 0000000..b1f632c --- /dev/null +++ b/doxygen/dox/LearnHDFView.dox @@ -0,0 +1,472 @@ +/** @page LearnHDFView Learning HDF5 with HDFView + +Navigate back: \ref index "Main" / \ref GettingStarted +
+ +This tutorial enables you to get a feel for HDF5 by using the HDFView browser. It does NOT require +any programming experience. + +\section sec_learn_hv_install HDFView Installation +\li Download and install HDFView. It can be downloaded from the Download HDFView page. +\li Obtain the storm1.txt text file, used in the tutorial. + +\section sec_learn_hv_begin Begin Tutorial +Once you have HDFView installed, bring it up and you are ready to begin the tutorial. + + + + + + +
+Unable to complete tutorial because fields are greyed out? +
+This tutorial requires that the default HDFView File Access Mode be Read / Write. If fields are greyed out so that you cannot select them, then the File Access Mode is Read Only. + +To change the File Access Mode follow these steps: +
    +
  • Bring up HDFView
  • +
  • Left-mouse click on the Tools pull-down menu and select User Options.
  • +
  • A Preferences window pops up with the General Settings tab selected. +About half-way down you will see Default File Access Mode. +Select Read / Write.
  • +
  • Click on Apply and Close at the bottom of the window.
  • +
  • Close down HDFView.
  • +
  • Bring HDFView back up and try the tutorial again.
  • +PLEASE BE AWARE that selecting a File Access Mode of Read / Write can result in changes to the timestamp of HDF files that are viewed with HDFView. In general, a File Access Mode +of Read Only should be used to ensure that this does not occur. +
+
+ +\subsection subsec_learn_hv_begin_topics Topics Covered +Following are the topics covered in the tutorial. The first topic creates the file that is used in +the subsequent topics. +
    +
  • @ref subsec_learn_hv_topics_file
  • +
  • @ref subsec_learn_hv_topics_image
  • +
  • @ref subsec_learn_hv_topics_attr
  • +
  • @ref subsec_learn_hv_topics_compress
  • +
  • @ref subsec_learn_hv_topics_subset
  • +
  • @ref subsec_learn_hv_topics_table
  • +
+ +\section sec_learn_hv_topics Topics + +\subsection subsec_learn_hv_topics_file Creating a New HDF5 File with a Contiguous Dataset +The steps below describe how to create a file (storm.h5), group (/Data), and a contiguous dataset +(/Data/Storm) using HDFView. A group is an HDF5 object that allows objects to be collected together. +A dataset is an array of data values. A contiguous dataset is one that is stored as a single block +in the HDF5 file. +
    +
  • Select the File pull-down menu at the top left, and then select New -> HDF5.
  • +
  • Specify a location and type in storm.h5 for the name of your file, and click on the Save button. +You will see the storm.h5 file in the TableView: + + + + +
    +\image html storm.png +
    +
  • +
  • Right click on storm.h5, and select New -> Group.
  • +
  • Enter Data for the name of the group and then click the Ok button. You will see the group Data in the TableView. + + + + +
    +\image html DataGroup.png +
    +
  • +
  • Right click on the group Data and select New -> Dataset.
  • +
  • A window pops up on the right. Fill in the information as follows, and then click Ok (leave the +Datatype information as is): + + + + + + + + + + + + + +
    Dataset Name +Storm +
    Under Dataspace, Current size +57x57 +
    Layout +Contiguous (default) +
    +
  • +
  • Click to expand the Data group in the tree view to see the Storm dataset: + + + + +
    +\image html StormDataset.png +
    +
  • +
  • Double left click on the Storm dataset in the tree view. A window with an empty spreadsheet pops open.
  • +
  • Copy the data from the storm1.txt file into the dataset. + +If you downloaded storm1.txt, +then click on the Import/Export Data menu and select Import Data from -> Text File. +Specify a location, select storm1.txt +and click on the Open button. Answer Yes in the dialog box that +pops up (which asks if you wish to paste the selected data). + +Alternately, you can copy/paste directly. Select and copy the data in a separate window. Position your +cursor at (0,0) in your table, and select Paste from the Table menu. + +The values will be entered into the spreadsheet. + + + + +
    +\image html datasetwdata.png +
    +
  • +
  • Table -> Close the dataset, and save the data.
  • +
+ +\subsection subsec_learn_hv_topics_image Displaying a Dataset as an Image +Any dataset can be viewed as an image in HDFView. Below are the steps that demonstrate this. +
    +
  • Right click on Storm in the tree view, and select Open As.
  • +
  • Select the Image button under Display As (near the top) in the Dataset Selection window that pops +up. Then click OK at the bottom of the window to display the image. + + + + +
    +\image html showasimage.png +
    +
  • +
  • The rainbow icon brings you to the Image Palette window. Click on that to play with the palette +(GrayWave probably is the best choice). Close.
  • +
+ +\subsection subsec_learn_hv_topics_attr Creating Attributes +Additional information to describe an object can be stored in attributes. An attribute can be +added to a group or dataset with HDFView. + +The following illustrates how to add an attribute to the group /Data: +
    +
  • Click on the /Data folder in the tree view. You will see two tabs, Object Attribute Info and +General Object Info, in the pane on the right site of the HDFView window. + + + + +
    +\image html noattrs.png +
    +
  • +
  • With the left mouse button, select the Add Attribute button.
  • +
  • Select the Add Attribute button to add an attribute with these values:
  • + + + + + + + + + + + + +
    Name +BatchID +
    Type +INTEGER +
    Size (bits) +32 +
    +
  • Select the Ok button. The attribute will show up under the Object Attribute Info tab.
  • +
  • Double-click the BatchID attribute line to open the data table for BatchID.
  • +
  • Click in the first cell and enter 3343 followed by the enter key.
  • +
  • Table -> Close, answer Yes in the dialog box that +pops up (which asks if you wish to paste the selected data).
  • +
+Adding an attribute to a dataset is very similar to adding an attribute to a group. For example, +the following adds an attribute to the /Storm dataset: +
    +
  • Left mouse click on the /Storm dataset in the tree view. You will see the Object Attribute +Info and General Object Info tabs on the right
  • +
  • In the Object Attribute Info pane select the Add Attribute button and enter an attribute with +these values. (Be sure to add a String Length or the string will be truncated to one character!):
  • + + + + + + + + + + + + +
    Name +Units +
    Type +STRING +
    String Length +3 +
    +
  • Select the Ok button. The attribute will show up under the Object Attribute Info tab.
  • +
  • Double-click the Units attribute line to open the data table for Units.
  • +
  • Click in the first cell and enter m/s followed by the enter key.
  • +
  • Table -> Close, answer Yes in the dialog box that +pops up (which asks if you wish to paste the selected data). + + + + +
    +\image html scarletletter.png +
    +
  • +
+ +\subsection subsec_learn_hv_topics_compress Creating a Compressed and Chunked Dataset +A chunked and compressed dataset can be created using HDFView. A compressed dataset is a dataset +whose size has been compressed to take up less space. In order to compress an HDF5 dataset, the +dataset must be stored with a chunked dataset layout (as multiple chunks that are stored separately +in the file). + +Please note that the chunk sizes used in this topic are for demonstration purposes only. For +information on chunking and specifying an appropriate chunk size, see the +Chunking in HDF5 documentation. + +Also see the HDF5 Tutorial topic on \ref secLBComDsetCreate. +
    +
  • Right click on storm.h5. Select New -> Group.
  • +
  • Enter Image for the name of the group, and click the OK button to create the group. + + + + +
    +\image html newgroupimage.png +
    +
  • +
  • Right click on the Image group, and select New -> Dataset.
  • +
  • Enter the following information for the dataset. Leave the Datatype as is (INTEGER): + + + + + + + + + + + + + + + + + + + + + + + + +
    Dataset name +Another Storm +
    Under Dataspace, Current size +57x57 +
    Storage Layout +Chunked +
    Chunk Size +20x20 +
    Compression +gzip +
    Compression Level +9 +
    +You will see the Another Storm dataset in the Image group: + + + + +
    +\image html hdfview-anthrstrm.png +
    +
  • +
  • Double left-mouse click on the Another Storm dataset to display the spreadsheet: + + + + +
    +\image html hdfview-anthrstrm-sprdsht.png +
    +
  • +
  • Copy the data from the storm1.txt file into the dataset. (See the previous topic for copying +storm1.txt into a dataset.)
  • +
  • Table -> Close, and save the data.
  • +
  • Right click on Another Storm, and select Open As.
  • +
  • Select the Image button in the Dataset Selection window that pops up. Click the Ok button at the +bottom of the window to view the dataset as an image. + + + + +
    +\image html hdfview-anthrstrm-img.png +
    +
  • +
+ +\subsection subsec_learn_hv_topics_subset Creating an Image and a Subset +A previous topic demonstrated how to view any dataset as an image in HDFView. With HDFView you can also +create an image to begin with, as is shown below. +
    +
  • Right click on the Data group and select New -> Image.
  • +
  • A window pops up on the right. Enter the following and then click Ok:
  • + + + + + + + + + + + + +
    Image name +Storm Image +
    Height +57 +
    Width +57 +
    + +
  • Close the dataset.
  • +
  • Expand the Data group to see its contents. You will see the Storm Image dataset. + + + + +
    +\image html hdfview-imgicon.png +
    +
  • +
  • +Add data to the Storm Image dataset as was shown previously: +
      +
    • Right click on Storm Image, and select Open As to open the Dataset Selection window.
    • +
    • Click on the Spreadsheet button at the top left of the Dataset Selection window to view the image +as a spreadsheet.
    • +
    • Copy the data from the storm1.txt file into the dataset.
    • +
    • Close the dataset and save the data.
    • +
    +
  • +
  • Left double click on Storm Image to see the image. Close the dataset.
  • +
  • Right click on Storm Image and select Open As to bring up the Data Selection window.
  • +
  • Select a subset by clicking the left mouse on the image in the window and dragging the mouse. +Notice that the Height and Width values change. Select to display it as an image. Click Ok. + + + + +
    +\image html hdfview-imgsubset.png +
    +
  • +
  • Position the cursor in the middle of the image. Press Shift+Left Mouse button and hold, and then +drag the mouse to select another subset.
  • +
  • Select Image->Write Selection to Image. Enter Subset for the new image name. Click Ok. The Subset +image will appear in the tree view on the left.
  • +
  • Left double click on the image Subset to bring it up on the right. + + + + +
    +\image html hdfview-newimgsubset.png +
    +
  • +
  • Close the Subset image.
  • +
+ +\subsection subsec_learn_hv_topics_table Creating a Table (Compound Dataset) +A dataset with a compound datatype contains data elements that consist of multiple fields. If the +dataspace for the compound dataset is one-dimensional, then the dataset can be viewed as a table in +HDFView, as is shown below. +
    +
  • Right button click on the group Data. Select New -> Compound DS.
  • +
  • A window pops up. Only fill in the following fields: + + + + + + + + + + + + + + + + + +
    Dataset name +Table +
    Dataspace (Current size only) +4 +
    Compound Datatype Properties: +
    Number of Members +
    3 +
    Compound Datatype Properties: +
    Name / Datatype / Size +
    Description / string / 4 +
    Temperature / float / 1 +
    Pressure / double / 1 +
    + + + + + +
    +\image html hdfview-newcmpd.png +
    +
  • +
  • Click Ok at the bottom.
  • +
  • Open the Data group (if it is not open) and double left click on the Table object. + + + + +
    +\image html hdfview-table.png +
    +
  • +
  • Close the dataset.
  • +
+ +
+Navigate back: \ref index "Main" / \ref GettingStarted + +*/ diff --git a/doxygen/dox/ReferenceManual.dox b/doxygen/dox/ReferenceManual.dox index df0c747..7900925 100644 --- a/doxygen/dox/ReferenceManual.dox +++ b/doxygen/dox/ReferenceManual.dox @@ -1,53 +1,32 @@ /** \page RM HDF5 Reference Manual -The functions provided by the HDF5 C-API are grouped into the following +The functions provided by the HDF5 API are grouped into the following \Emph{modules}: + +
Modules
- - - +\include{doc} core_menu.md + + + + + + + + diff --git a/doxygen/dox/Specifications.dox b/doxygen/dox/Specifications.dox index 5a36d61..e352f40 100644 --- a/doxygen/dox/Specifications.dox +++ b/doxygen/dox/Specifications.dox @@ -2,20 +2,20 @@ \section DDL -\li \ref DDLBNF110 "DDL in BNF through HDF5 1.10" -\li \ref DDLBNF112 "DDL in BNF for HDF5 1.12 and above" +\li \ref DDLBNF110 +\li \ref DDLBNF112 \section File Format -\li \ref FMT1 "HDF5 File Format Specification Version 1.0" -\li \ref FMT11 "HDF5 File Format Specification Version 1.1" -\li \ref FMT2 "HDF5 File Format Specification Version 2.0" -\li \ref FMT3 "HDF5 File Format Specification Version 3.0" +\li \ref FMT1 +\li \ref FMT11 +\li \ref FMT2 +\li \ref FMT3 \section Other -\li \ref IMG "HDF5 Image and Palette Specification Version 1.2" -\li \ref TBL "HDF5 Table Specification Version 1.0" +\li \ref IMG +\li \ref TBL \li HDF5 Dimension Scale Specification diff --git a/doxygen/dox/TechnicalNotes.dox b/doxygen/dox/TechnicalNotes.dox index 9bd2802..bca81e4 100644 --- a/doxygen/dox/TechnicalNotes.dox +++ b/doxygen/dox/TechnicalNotes.dox @@ -1,13 +1,13 @@ /** \page TN Technical Notes -\li \link api-compat-macros API Compatibility Macros \endlink -\li \ref APPDBG "Debugging HDF5 Applications" -\li \ref FMTDISC "File Format Walkthrough" -\li \ref FILTER "Filters" -\li \ref IOFLOW "HDF5 Raw I/O Flow Notes" -\li \ref TNMDC "Metadata Caching in HDF5" -\li \ref MT "Thread Safe library" -\li \ref VFL "Virtual File Layer" +\li \ref api-compat-macros +\li \ref APPDBG +\li \ref FMTDISC +\li \ref FILTER +\li \ref IOFLOW +\li \ref TNMDC +\li \ref MT +\li \ref VFL */ diff --git a/doxygen/dox/UsersGuide.dox b/doxygen/dox/UsersGuide.dox new file mode 100644 index 0000000..dbb6053 --- /dev/null +++ b/doxygen/dox/UsersGuide.dox @@ -0,0 +1,403 @@ +/** \page UG HDF5 User Guide + +
+HDF5 Release 1.14 + +\image html HDFG-logo.png "The HDF Group" + +
+ +\ref sec_data_model +\li \ref subsec_data_model_intro +\li \ref subsec_data_model_abstract +
    +
  • \ref subsubsec_data_model_abstract_file +
  • \ref subsubsec_data_model_abstract_group +
  • \ref subsubsec_data_model_abstract_dataset +
  • \ref subsubsec_data_model_abstract_space +
  • \ref subsubsec_data_model_abstract_type +
  • \ref subsubsec_data_model_abstract_attr +
  • \ref subsubsec_data_model_abstract_plist +
  • \ref subsubsec_data_model_abstract_link +
+\li \ref subsec_data_model_storage +
    +
  • \ref subsubsec_data_model_storage_spec +
  • \ref subsubsec_data_model_storage_imple +
+\li \ref subsec_data_model_structure +
    +
  • \ref subsubsec_data_model_structure_file +
  • \ref subsubsec_data_model_structure_path +
  • \ref subsubsec_data_model_structure_example +
+ +\ref sec_program +\li \ref subsec_program_intro +\li \ref subsec_program_model +
    +
  • \ref subsubsec_program_model_create +
  • \ref subsubsec_program_model_dset +
  • \ref subsubsec_program_model_close +
  • \ref subsubsec_program_model_data +
  • \ref subsubsec_program_model_partial +
  • \ref subsubsec_program_model_info +
  • \ref subsubsec_program_model_compound +
  • \ref subsubsec_program_model_extend +
  • \ref subsubsec_program_model_group +
  • \ref subsubsec_program_model_attr +
+\li \ref subsec_program_transfer_pipeline + +\ref sec_file +\li \ref subsec_file_intro +\li \ref subsec_file_access_modes +\li \ref subsec_file_creation_access +\li \ref subsec_file_drivers +\li \ref subsec_file_program_model +
    +
  • \ref subsubsec_file_program_model_create +
  • \ref subsubsec_file_program_model_open +
  • \ref subsubsec_file_program_model_close +
+\li \ref subsec_file_h5dump +\li \ref subsec_file_summary +\li \ref subsec_file_create +\li \ref subsec_file_closes +\li \ref subsec_file_property_lists +
    +
  • \ref subsubsec_file_property_lists_create +
  • \ref subsubsec_file_property_lists_props +
  • \ref subsubsec_file_property_lists_access +
+\li \ref subsec_file_alternate_drivers +
    +
  • \ref subsubsec_file_alternate_drivers_id +
  • \ref subsubsec_file_alternate_drivers_sec2 +
  • \ref subsubsec_file_alternate_drivers_direct +
  • \ref subsubsec_file_alternate_drivers_log +
  • \ref subsubsec_file_alternate_drivers_win +
  • \ref subsubsec_file_alternate_drivers_stdio +
  • \ref subsubsec_file_alternate_drivers_mem +
  • \ref subsubsec_file_alternate_drivers_family +
  • \ref subsubsec_file_alternate_drivers_multi +
  • \ref subsubsec_file_alternate_drivers_split +
  • \ref subsubsec_file_alternate_drivers_par +
+\li \ref subsec_file_examples +
    +
  • \ref subsubsec_file_examples_trunc +
  • \ref subsubsec_file_examples_props +
  • \ref subsubsec_file_examples_access +
+\li \ref subsec_file_multiple + +\ref sec_group +\li \ref subsec_group_intro +\li \ref subsec_group_descr +
    +
  • \ref subsubsec_group_descr_object +
  • \ref subsubsec_group_descr_model +
  • \ref subsubsec_group_descr_path +
  • \ref subsubsec_group_descr_impl +
+\li \ref subsec_group_h5dump +\li \ref subsec_group_function +\li \ref subsec_group_program +
    +
  • \ref subsubsec_group_program_create +
  • \ref subsubsec_group_program_open +
  • \ref subsubsec_group_program_dataset +
  • \ref subsubsec_group_program_close +
  • \ref subsubsec_group_program_links +
  • \ref subsubsec_group_program_info +
  • \ref subsubsec_group_program_objs +
  • \ref subsubsec_group_program_all +
+\li \ref subsec_group_examples + +\ref sec_dataset +\li \ref subsec_dataset_intro +\li \ref subsec_dataset_function +\li \ref subsec_dataset_program +
    +
  • \ref subsubsec_dataset_program_general +
  • \ref subsubsec_dataset_program_create +
  • \ref subsubsec_dataset_program_transfer +
  • \ref subsubsec_dataset_program_read +
+\li \ref subsec_dataset_transfer Data Transfer +
    +
  • \ref subsubsec_dataset_transfer_pipe +
  • \ref subsubsec_dataset_transfer_filter +
  • \ref subsubsec_dataset_transfer_drive +
  • \ref subsubsec_dataset_transfer_props +
  • \ref subsubsec_dataset_transfer_store +
  • \ref subsubsec_dataset_transfer_partial +
+\li \ref subsec_dataset_allocation +
    +
  • \ref subsubsec_dataset_allocation_store +
  • \ref subsubsec_dataset_allocation_delete +
  • \ref subsubsec_dataset_allocation_release +
  • \ref subsubsec_dataset_allocation_ext +
+\li \ref subsec_dataset_filters +
    +
  • \ref subsubsec_dataset_filters_nbit +
  • \ref subsubsec_dataset_filters_scale +
  • \ref subsubsec_dataset_filters_szip +
+ +\ref sec_datatype +\li \ref subsec_datatype_intro +\li \ref subsec_datatype_model +
    +
  • \ref subsubsec_datatype_model_class +
  • \ref subsubsec_datatype_model_predefine +
+\li \ref subsec_datatype_usage +
    +
  • \ref subsubsec_datatype_usage_object +
  • \ref subsubsec_datatype_usage_create +
  • \ref subsubsec_datatype_usage_transfer +
  • \ref subsubsec_datatype_usage_discover +
  • \ref subsubsec_datatype_usage_user +
+\li \ref subsec_datatype_function +\li \ref subsec_datatype_program +
    +
  • \ref subsubsec_datatype_program_discover +
  • \ref subsubsec_datatype_program_define +
+\li \ref subsec_datatype_other +
    +
  • \ref subsubsec_datatype_other_strings +
  • \ref subsubsec_datatype_other_refs +
  • \ref subsubsec_datatype_other_enum +
  • \ref subsubsec_datatype_other_opaque +
  • \ref subsubsec_datatype_other_bitfield +
+\li \ref subsec_datatype_fill +\li \ref subsec_datatype_complex +
    +
  • \ref subsubsec_datatype_complex_create +
  • \ref subsubsec_datatype_complex_analyze +
+\li \ref subsec_datatype_life +\li \ref subsec_datatype_transfer +\li \ref subsec_datatype_text + +\ref sec_dataspace +\li \ref subsec_dataspace_intro +\li \ref subsec_dataspace_function +\li \ref subsec_dataspace_program +
    +
  • \ref subsubsec_dataspace_program_object +
  • \ref subsubsec_dataspace_program_model +
+\li \ref subsec_dataspace_transfer +
    +
  • \ref subsubsec_dataspace_transfer_select +
  • \ref subsubsec_dataspace_transfer_model +
+\li \ref subsec_dataspace_select +\li \ref subsec_dataspace_refer +
    +
  • \ref subsubsec_dataspace_refer_use +
  • \ref subsubsec_dataspace_refer_create +
  • \ref subsubsec_dataspace_refer_read +
+\li \ref subsec_dataspace_sample + +\ref sec_attribute +\li \ref subsec_attribute_intro +\li \ref subsec_attribute_program +
    +
  • To Open and Read or Write an Existing Attribute
  • +
+\li \ref subsec_error_H5A +\li \ref subsec_attribute_work +
    +
  • \ref subsubsec_attribute_work_struct +
  • \ref subsubsec_attribute_work_create +
  • \ref subsubsec_attribute_work_access +
  • \ref subsubsec_attribute_work_info +
  • \ref subsubsec_attribute_work_iterate +
  • \ref subsubsec_attribute_work_delete +
  • \ref subsubsec_attribute_work_close +
+\li \ref subsec_attribute_special + +\ref sec_error +\li \ref subsec_error_intro +\li \ref subsec_error_program +\li \ref subsec_error_H5E +\li \ref subsec_error_ops +
    +
  • \ref subsubsec_error_ops_stack +
  • \ref subsubsec_error_ops_print +
  • \ref subsubsec_error_ops_mute +
  • \ref subsubsec_error_ops_custom_print +
  • \ref subsubsec_error_ops_walk +
  • \ref subsubsec_error_ops_travers +
+\li \ref subsec_error_adv +
    +
  • \ref subsubsec_error_adv_more +
  • \ref subsubsec_error_adv_app +
+ +\ref sec_plist +\li \ref subsec_plist_intro +\li \ref subsec_plist_class +
    +
  • \ref subsubsec_plist_class +
  • \ref subsubsec_plist_lists +
  • \ref subsubsec_plist_props +
+\li \ref subsec_plist_program +
    +
  • \ref subsubsec_plist_default +
  • \ref subsubsec_plist_basic +
  • \ref subsubsec_plist_additional +
+\li \ref subsec_plist_generic +\li \ref subsec_plist_H5P +\li \ref subsec_plist_resources +\li \ref subsec_plist_notes + +\ref sec_vol +\li \ref subsec_vol_intro +\li \ref subsec_vol_abstract_layer +\li \ref subsec_vol_connect +\li \ref subsec_vol_use + +\ref sec_async +\li \ref subsec_async_intro + +\ref sec_map + +\ref sec_addition + +\page AR_UG Additional Resources + +\section sec_addition Additional Resources +These documents provide additional information for the use and tuning of specific HDF5 features. +
-\li \ref H5A "Attributes (H5A)" -\li \ref H5D "Datasets (H5D)" -\li \ref H5S "Dataspaces (H5S)" -\li \ref H5T "Datatypes (H5T)" -\li \ref H5E "Error Handling (H5E)" -\li \ref H5ES "Event Sets (H5ES)" -\li \ref H5F "Files (H5F)" -\li \ref H5Z "Filters (H5Z)" -\li \ref H5G "Groups (H5G)" - -\li \ref H5I "Identifiers (H5I)" -\li \ref H5 "Library General (H5)" -\li \ref H5L "Links (H5L)" -\li \ref H5M "Maps (H5M)" -\li \ref H5O "Objects (H5O)" -\li \ref H5P "Property Lists (H5P)" -\li \ref H5PL "Dynamically-loaded Plugins (H5PL)" -\li \ref H5R "References (H5R)" -\li \ref H5VL "Virtual Object Layer (H5VL)" - -\li \ref high_level -
    -
  • \ref H5LT "Lite (H5LT, H5LD)" -
  • \ref H5IM "Images (H5IM)" -
  • \ref H5TB "Table (H5TB)" -
  • \ref H5PT "Packet Table (H5PT)" -
  • \ref H5DS "Dimension Scale (H5DS)" -
  • \ref H5DO "Optimizations (H5DO)" -
  • \ref H5LR "Extensions (H5LR, H5LT)" -
-
-\a Core \a library: \ref H5 \ref H5A \ref H5D \ref H5E \ref H5ES \ref H5F \ref H5G \ref H5I \ref H5L -\ref H5M \ref H5O \ref H5P \ref H5PL \ref H5R \ref H5S \ref H5T \ref H5VL \ref H5Z -
-\a High-level \a library: \ref H5LT \ref H5IM \ref H5TB \ref H5PT \ref H5DS \ref H5DO \ref H5LR -
+ +\include{doc} high_level_menu.md +
+ +\include{doc} fortran_menu.md +
+ +\include{doc} java_menu.md +
Deprecated functions Functions with \ref ASYNC
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table of Additional resources
+

Document

+
+

Comments

+
+

@ref HDF5Examples

+
+

Code examples by API.

+
+

Chunking in HDF5

+
+

Structuring the use of chunking and tuning it for performance.

+
+

Using the Direct Chunk Write Function

+
+

Describes another way that chunks can be written to datasets.

+
+

Copying Committed Datatypes with H5Ocopy

+
+

Describes how to copy to another file a dataset that uses a committed datatype or an object with an attribute that uses a committed datatype so that the committed datatype in the destination file can be used by multiple objects.

+
+

Metadata Caching in HDF5

+
+

Managing the HDF5 metadata cache and tuning it for performance.

+
+

HDF5 Dynamically Loaded Filters

+
+

Describes how an HDF5 application can apply a filter that is not registered with the HDF5 Library.

+
+

HDF5 File Image Operations

+
+

Describes how to work with HDF5 files in memory. Disk I/O is not required when file images are opened, created, read from, or written to.

+
+

Modified Region Writes

+
+

Describes how to set write operations for in-memory files so that only modified regions are written to storage. Available when the Core (Memory) VFD is used.

+
+

Using Identifiers

+
+

Describes how identifiers behave and how they should be treated.

+
+

Using UTF-8 Encoding in HDF5 Applications

+
+

Describes the use of UTF-8 Unicode character encodings in HDF5 applications.

+
+

Freeing Memory Allocated by the HDF5 Library

+
+

Describes how inconsistent memory management can cause heap corruption or resource leaks and possible solutions.

+
+

HDF5 Glossary

+
+

A glossary of terms.

+
+ +Previous Chapter \ref sec_plist + +\par Don't like what you see? - You can help to improve this User Guide + Complete the survey linked near the top of this page!\n + We treat documentation like code: Fork the + HDF5 repo, make changes, and create a + pull request !\n + +*/ \ No newline at end of file diff --git a/doxygen/dox/ViewTools.dox b/doxygen/dox/ViewTools.dox new file mode 100644 index 0000000..0b685a0 --- /dev/null +++ b/doxygen/dox/ViewTools.dox @@ -0,0 +1,1198 @@ +/** @page ViewTools Tools for Viewing and Editing HDF5 Files + +Navigate back: \ref index "Main" / \ref GettingStarted +
+ +\section secToolsBasic Basic Facts about HDF5 +The following are basic facts about HDF5 files to keep in mind while completing these tutorial topics: +\li All HDF5 files contain a root group "/". +\li There are two primary objects in HDF5, a group and a dataset:
+ Groups allow objects to be organized into a group structure, such as a tree.
+ Datasets contain raw data values. +\li Additional information about an HDF5 object may optionally be stored in attributes attached to the object. + +\section secToolsTopics Tutorial Topics + + + + + + + + + + + + + + + + + +
Tutorial TopicDescription
+@ref LearnHDFView +Use HDFView to create, edit and view files. +
+@ref ViewToolsCommand +Use the HDF5 command-line tools for viewing, editing, and comparing HDF5 files. +
@ref ViewToolsJPSS +Use HDF5 tools to examine and work with JPSS NPP files. +
+ +
+Navigate back: \ref index "Main" / \ref GettingStarted + +@page ViewToolsCommand Command-line Tools +Navigate back: \ref index "Main" / \ref GettingStarted +
+ +\section secViewToolsCommandObtain Obtain Tools and Files (Optional) +Pre-built binaries for Linux and Windows are distributed within the respective HDF5 binary release +packages, which can be obtained from the Download HDF5 page. + +HDF5 files can be obtained from various places such as \ref HDF5Examples and HDF-EOS and Tools and +Information Center. Specifically, the following examples are used in this tutorial topic: +\li HDF5 Files created from compiling the \ref LBExamples +\li HDF5 Files on the Examples by API page +\li NPP JPSS files, SVM01_npp.. (gzipped) +and SVM09_npp.. (gzipped) +\li HDF-EOS OMI-Aura file + +\section secViewToolsCommandTutor Tutorial Topics +A variety of command-line tools are included in the HDF5 binary distribution. There are tools to view, +edit, convert and compare HDF5 files. This tutorial discusses the tools by their functionality. It +does not cover all of the HDF5 tools. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Tool CategoryTopicTools Used
@ref ViewToolsView@ref secViewToolsViewContenth5dump and h5ls +
@ref secViewToolsViewDseth5dump and h5ls +
@ref secViewToolsViewGrpsh5dump and h5ls +
@ref secViewToolsViewAttrh5dump +
@ref secViewToolsViewSubh5dump +
@ref secViewToolsViewDtypesh5dump +
@ref ViewToolsEdit@ref secViewToolsEditRemoveh5repack +
@ref secViewToolsEditChangeh5repack +
@ref secViewToolsEditApplyh5repack +
@ref secViewToolsEditCopyh5copy +
@ref secViewToolsEditAddh5jam and h5unjam +
@ref ViewToolsConvert@ref secViewToolsConvertASCIIh5dump +
@ref secViewToolsConvertBinaryh5dump +
@ref secViewToolsConvertExporth5dump and h5import +
+ +
+Navigate back: \ref index "Main" / \ref GettingStarted + +@page ViewToolsView Command-line Tools For Viewing HDF5 Files +Navigate back: \ref index "Main" / \ref GettingStarted / \ref ViewToolsCommand +
+ +\section secViewToolsViewTOC Contents +
    +
  • \ref secViewToolsViewContent
  • +
  • \ref secViewToolsViewDset
  • +
  • \ref secViewToolsViewGrps
  • +
  • \ref secViewToolsViewAttr
  • +
  • \ref secViewToolsViewSub
  • +
  • \ref secViewToolsViewDtypes
  • +
+ +\section secViewToolsViewContent File Content and Structure +The h5dump and h5ls tools can both be used to view the contents of an HDF5 file. The tools are discussed below: +
    +
  • \ref subsecViewToolsViewContent_h5dump
  • +
  • \ref subsecViewToolsViewContent_h5ls
  • +
+ +\subsection subsecViewToolsViewContent_h5dump h5dump +The h5dump tool dumps or displays the contents of an HDF5 file (textually). By default if you specify no options, +h5dump will display the entire contents of a file. There are many h5dump options for examining specific details +of a file. To see all of the available h5dump options, specify the -h +or --help option: +\code +h5dump -h +\endcode + +The following h5dump options can be helpful in viewing the content and structure of a file: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
OptionDescriptionComment
-n, --contents +Displays a list of the objects in a file +See @ref subsubsecViewToolsViewContent_h5dumpEx1 +
-n 1, --contents=1 +Displays a list of the objects and attributes in a file +See @ref subsubsecViewToolsViewAttr_h5dumpEx6 +
-H, --header +Displays header information only (no data) +See @ref subsubsecViewToolsViewContent_h5dumpEx2 +
-A 0, --onlyattr=0 +Suppresses the display of attributes +See @ref subsubsecViewToolsViewContent_h5dumpEx2 +
-N P, --any_path=P +Displays any object or attribute that matches path P +See @ref subsubsecViewToolsViewAttr_h5dumpEx6 +
+ +\subsubsection subsubsecViewToolsViewContent_h5dumpEx1 Example 1 +The following command displays a list of the objects in the file OMI-Aura.he5 (an HDF-EOS5 file): +\code +h5dump -n OMI-Aura.he5 +\endcode + +As shown in the output below, the objects (groups, datasets) are listed to the left, followed by their +names. You can see that this file contains two root groups, HDFEOS and HDFEOS INFORMATION: +\code +HDF5 "OMI-Aura.he5" { +FILE_CONTENTS { + group / + group /HDFEOS + group /HDFEOS/ADDITIONAL + group /HDFEOS/ADDITIONAL/FILE_ATTRIBUTES + group /HDFEOS/GRIDS + group /HDFEOS/GRIDS/OMI Column Amount O3 + group /HDFEOS/GRIDS/OMI Column Amount O3/Data Fields + dataset /HDFEOS/GRIDS/OMI Column Amount O3/Data Fields/ColumnAmountO3 + dataset /HDFEOS/GRIDS/OMI Column Amount O3/Data Fields/RadiativeCloudFraction + dataset /HDFEOS/GRIDS/OMI Column Amount O3/Data Fields/SolarZenithAngle + dataset /HDFEOS/GRIDS/OMI Column Amount O3/Data Fields/ViewingZenithAngle + group /HDFEOS INFORMATION + dataset /HDFEOS INFORMATION/StructMetadata.0 + } +} +\endcode + +\subsubsection subsubsecViewToolsViewContent_h5dumpEx2 Example 2 +The file structure of the OMI-Aura.he5 file can be seen with the following command. The -A 0 option suppresses the display of attributes: +\code +h5dump -H -A 0 OMI-Aura.he5 +\endcode + +Output of this command is shown below: +\code +HDF5 "OMI-Aura.he5" { +GROUP "/" { + GROUP "HDFEOS" { + GROUP "ADDITIONAL" { + GROUP "FILE_ATTRIBUTES" { + } + } + GROUP "GRIDS" { + GROUP "OMI Column Amount O3" { + GROUP "Data Fields" { + DATASET "ColumnAmountO3" { + DATATYPE H5T_IEEE_F32LE + DATASPACE SIMPLE { ( 720, 1440 ) / ( 720, 1440 ) } + } + DATASET "RadiativeCloudFraction" { + DATATYPE H5T_IEEE_F32LE + DATASPACE SIMPLE { ( 720, 1440 ) / ( 720, 1440 ) } + } + DATASET "SolarZenithAngle" { + DATATYPE H5T_IEEE_F32LE + DATASPACE SIMPLE { ( 720, 1440 ) / ( 720, 1440 ) } + } + DATASET "ViewingZenithAngle" { + DATATYPE H5T_IEEE_F32LE + DATASPACE SIMPLE { ( 720, 1440 ) / ( 720, 1440 ) } + } + } + } + } + } + GROUP "HDFEOS INFORMATION" { + DATASET "StructMetadata.0" { + DATATYPE H5T_STRING { + STRSIZE 32000; + STRPAD H5T_STR_NULLTERM; + CSET H5T_CSET_ASCII; + CTYPE H5T_C_S1; + } + DATASPACE SCALAR + } + } +} +} +\endcode + +\subsection subsecViewToolsViewContent_h5ls h5ls +The h5ls tool by default just displays the objects in the root group. It will not display +items in groups beneath the root group unless specified. Useful h5ls options for viewing +file content and structure are: + + + + + + + + + + + + + + + + +
OptionDescriptionComment
-r +Lists all groups and objects recursively +See @ref subsubsecViewToolsViewContent_h5lsEx3 +
-v +Generates verbose output (lists dataset properties, attributes +and attribute values, but no dataset values) + +
+ +\subsubsection subsubsecViewToolsViewContent_h5lsEx3 Example 3 +The following command shows the contents of the HDF-EOS5 file OMI-Aura.he5. The output is similar to h5dump, except that h5ls also shows dataspace information for each dataset: +\code +h5ls -r OMI-Aura.he5 +\endcode + +The output is shown below: +\code +/ Group +/HDFEOS Group +/HDFEOS/ADDITIONAL Group +/HDFEOS/ADDITIONAL/FILE_ATTRIBUTES Group +/HDFEOS/GRIDS Group +/HDFEOS/GRIDS/OMI\ Column\ Amount\ O3 Group +/HDFEOS/GRIDS/OMI\ Column\ Amount\ O3/Data\ Fields Group +/HDFEOS/GRIDS/OMI\ Column\ Amount\ O3/Data\ Fields/ColumnAmountO3 Dataset {720, 1440} +/HDFEOS/GRIDS/OMI\ Column\ Amount\ O3/Data\ Fields/RadiativeCloudFraction Dataset {720, 1440} +/HDFEOS/GRIDS/OMI\ Column\ Amount\ O3/Data\ Fields/SolarZenithAngle Dataset {720, 1440} +/HDFEOS/GRIDS/OMI\ Column\ Amount\ O3/Data\ Fields/ViewingZenithAngle Dataset {720, 1440} +/HDFEOS\ INFORMATION Group +/HDFEOS\ INFORMATION/StructMetadata.0 Dataset {SCALAR} +\endcode + +\section secViewToolsViewDset Datasets and Dataset Properties +Both h5dump and h5ls can be used to view specific datasets. +
    +
  • \ref subsecViewToolsViewDset_h5dump
  • +
  • \ref subsecViewToolsViewDset_h5ls
  • +
+ +\subsection subsecViewToolsViewDset_h5dump h5dump +Useful h5dump options for examining specific datasets include: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
OptionDescriptionComment
-d D, --dataset=D +Displays dataset D +See @ref subsubsecViewToolsViewDset_h5dumpEx4 +
-H, --header +Displays header information only +See @ref subsubsecViewToolsViewDset_h5dumpEx4 +
-p, --properties +Displays dataset filters, storage layout, and fill value properties +See @ref subsubsecViewToolsViewDset_h5dumpEx5 +
-A 0, --onlyattr=0 +Suppresses the display of attributes +See @ref subsubsecViewToolsViewContent_h5dumpEx2 +
-N P, --any_path=P +Displays any object or attribute that matches path P +See @ref subsubsecViewToolsViewAttr_h5dumpEx6 +
+ +\subsubsection subsubsecViewToolsViewDset_h5dumpEx4 Example 4 +A specific dataset can be viewed with h5dump using the -d D option and specifying the entire +path and name of the dataset for D. The path is important in identifying the correct dataset, +as there can be multiple datasets with the same name. The path can be determined by looking at +the objects in the file with h5dump -n. + +The following example uses the groups.h5 file that is created by the +\ref LBExamples +example h5_crtgrpar.c. To display dset1 in the groups.h5 file below, specify dataset +/MyGroup/dset1. The -H option is used to suppress printing of the data values: + +Contents of groups.h5 +\code + $ h5dump -n groups.h5 + HDF5 "groups.h5" { + FILE_CONTENTS { + group / + group /MyGroup + group /MyGroup/Group_A + dataset /MyGroup/Group_A/dset2 + group /MyGroup/Group_B + dataset /MyGroup/dset1 + } + } +\endcode + +Display dataset "dset1" +\code + $ h5dump -d "/MyGroup/dset1" -H groups.h5 + HDF5 "groups.h5" { + DATASET "/MyGroup/dset1" { + DATATYPE H5T_STD_I32BE + DATASPACE SIMPLE { ( 3, 3 ) / ( 3, 3 ) } + } + } +\endcode + +\subsubsection subsubsecViewToolsViewDset_h5dumpEx5 Example 5 +The -p option is used to examine the the dataset filters, storage layout, and fill value properties of a dataset. + +This option can be useful for checking how well compression works, or even for analyzing performance +and dataset size issues related to chunking. (The smaller the chunk size, the more chunks that HDF5 +has to keep track of, which increases the size of the file and potentially affects performance.) + +In the file shown below the dataset /DS1 is both chunked and compressed: +\code + $ h5dump -H -p -d "/DS1" h5ex_d_gzip.h5 + HDF5 "h5ex_d_gzip.h5" { + DATASET "/DS1" { + DATATYPE H5T_STD_I32LE + DATASPACE SIMPLE { ( 32, 64 ) / ( 32, 64 ) } + STORAGE_LAYOUT { + CHUNKED ( 4, 8 ) + SIZE 5278 (1.552:1 COMPRESSION) + } + FILTERS { + COMPRESSION DEFLATE { LEVEL 9 } + } + FILLVALUE { + FILL_TIME H5D_FILL_TIME_IFSET + VALUE 0 + } + ALLOCATION_TIME { + H5D_ALLOC_TIME_INCR + } + } + } +\endcode + +You can obtain the h5ex_d_gzip.c program that created this file, as well as the file created, +from the Examples by API page. + +\subsection subsecViewToolsViewDset_h5ls h5ls +Specific datasets can be specified with h5ls by simply adding the dataset path and dataset after the +file name. As an example, this command displays dataset dset2 in the groups.h5 +file used in @ref subsubsecViewToolsViewDset_h5dumpEx4 : +\code +h5ls groups.h5/MyGroup/Group_A/dset2 +\endcode + +Just the dataspace information gets displayed: +\code +dset2 Dataset {2, 10} +\endcode + +The following options can be used to see detailed information about a dataset. + + + + + + + + + + + + + +
OptionDescription
-v, --verbose +Generates verbose output (lists dataset properties, attributes +and attribute values, but no dataset values) +
-d, --data +Displays dataset values +
+ +The output of using -v is shown below: +\code + $ h5ls -v groups.h5/MyGroup/Group_A/dset2 + Opened "groups.h5" with sec2 driver. + dset2 Dataset {2/2, 10/10} + Location: 1:3840 + Links: 1 + Storage: 80 logical bytes, 80 allocated bytes, 100.00% utilization + Type: 32-bit big-endian integer +\endcode + +The output of using -d is shown below: +\code + $ h5ls -d groups.h5/MyGroup/Group_A/dset2 + dset2 Dataset {2, 10} + Data: + (0,0) 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 +\endcode + +\section secViewToolsViewGrps Groups +Both h5dump and h5ls can be used to view specific groups in a file. +
    +
  • \ref subsecViewToolsViewGrps_h5dump
  • +
  • \ref subsecViewToolsViewGrps_h5ls
  • +
+ +\subsection subsecViewToolsViewGrps_h5dump h5dump +The h5dump options that are useful for examining groups are: + + + + + + + + + + + + + + + + + +
OptionDescription
-g G, --group=G +Displays group G and its members +
-H, --header +Displays header information only +
-A 0, --onlyattr=0 +Suppresses the display of attributes +
+ +To view the contents of the HDFEOS group in the OMI file mentioned previously, you can specify the path and name of the group as follows: +\code +h5dump -g "/HDFEOS" -H -A 0 OMI-Aura.he5 +\endcode + +The -A 0 option suppresses attributes and -H suppresses printing of data values: +\code + HDF5 "OMI-Aura.he5" { + GROUP "/HDFEOS" { + GROUP "ADDITIONAL" { + GROUP "FILE_ATTRIBUTES" { + } + } + GROUP "GRIDS" { + GROUP "OMI Column Amount O3" { + GROUP "Data Fields" { + DATASET "ColumnAmountO3" { + DATATYPE H5T_IEEE_F32LE + DATASPACE SIMPLE { ( 720, 1440 ) / ( 720, 1440 ) } + } + DATASET "RadiativeCloudFraction" { + DATATYPE H5T_IEEE_F32LE + DATASPACE SIMPLE { ( 720, 1440 ) / ( 720, 1440 ) } + } + DATASET "SolarZenithAngle" { + DATATYPE H5T_IEEE_F32LE + DATASPACE SIMPLE { ( 720, 1440 ) / ( 720, 1440 ) } + } + DATASET "ViewingZenithAngle" { + DATATYPE H5T_IEEE_F32LE + DATASPACE SIMPLE { ( 720, 1440 ) / ( 720, 1440 ) } + } + } + } + } + } + } +\endcode + +\subsection subsecViewToolsViewGrps_h5ls h5ls +You can view the contents of a group with h5ls/ by specifying the group after the file name. +To use h5ls to view the contents of the /HDFEOS group in the OMI-Aura.he5 file, type: +\code +h5ls -r OMI-Aura.he5/HDFEOS +\endcode + +The output of this command is: +\code + /ADDITIONAL Group + /ADDITIONAL/FILE_ATTRIBUTES Group + /GRIDS Group + /GRIDS/OMI\ Column\ Amount\ O3 Group + /GRIDS/OMI\ Column\ Amount\ O3/Data\ Fields Group + /GRIDS/OMI\ Column\ Amount\ O3/Data\ Fields/ColumnAmountO3 Dataset {720, 1440} + /GRIDS/OMI\ Column\ Amount\ O3/Data\ Fields/RadiativeCloudFraction Dataset {720, 1440} + /GRIDS/OMI\ Column\ Amount\ O3/Data\ Fields/SolarZenithAngle Dataset {720, 1440} + /GRIDS/OMI\ Column\ Amount\ O3/Data\ Fields/ViewingZenithAngle Dataset {720, 1440} +\endcode + +If you specify the -v option, you can also see the attributes and properties of the datasets. + +\section secViewToolsViewAttr Attributes + +\subsection subsecViewToolsViewAttr_h5dump h5dump +Attributes are displayed by default if using h5dump. Some files contain many attributes, which +can make it difficult to examine the objects in the file. Shown below are options that can help +when using h5dump to work with files that have attributes. + +\subsubsection subsubsecViewToolsViewAttr_h5dumpEx6 Example 6 +The -a A option will display an attribute. However, the path to the attribute must be included +when specifying this option. For example, to see the ScaleFactor attribute in the OMI-Aura.he5 file, type: +\code +h5dump -a "/HDFEOS/GRIDS/OMI Column Amount O3/Data Fields/SolarZenithAngle/ScaleFactor" OMI-Aura.he5 +\endcode + +This command displays: +\code + HDF5 "OMI-Aura.he5" { + ATTRIBUTE "ScaleFactor" { + DATATYPE H5T_IEEE_F64LE + DATASPACE SIMPLE { ( 1 ) / ( 1 ) } + DATA { + (0): 1 + } + } + } +\endcode + +How can you determine the path to the attribute? This can be done by looking at the file contents with the -n 1 option: +\code +h5dump -n 1 OMI-Aura.he5 +\endcode + +Below is a portion of the output for this command: +\code + HDF5 "OMI-Aura.he5" { + FILE_CONTENTS { + group / + group /HDFEOS + group /HDFEOS/ADDITIONAL + group /HDFEOS/ADDITIONAL/FILE_ATTRIBUTES + attribute /HDFEOS/ADDITIONAL/FILE_ATTRIBUTES/EndUTC + attribute /HDFEOS/ADDITIONAL/FILE_ATTRIBUTES/GranuleDay + attribute /HDFEOS/ADDITIONAL/FILE_ATTRIBUTES/GranuleDayOfYear + attribute /HDFEOS/ADDITIONAL/FILE_ATTRIBUTES/GranuleMonth + attribute /HDFEOS/ADDITIONAL/FILE_ATTRIBUTES/GranuleYear + attribute /HDFEOS/ADDITIONAL/FILE_ATTRIBUTES/InstrumentName + attribute /HDFEOS/ADDITIONAL/FILE_ATTRIBUTES/OrbitNumber + attribute /HDFEOS/ADDITIONAL/FILE_ATTRIBUTES/OrbitPeriod + attribute /HDFEOS/ADDITIONAL/FILE_ATTRIBUTES/PGEVersion + attribute /HDFEOS/ADDITIONAL/FILE_ATTRIBUTES/Period + attribute /HDFEOS/ADDITIONAL/FILE_ATTRIBUTES/ProcessLevel + attribute /HDFEOS/ADDITIONAL/FILE_ATTRIBUTES/StartUTC + attribute /HDFEOS/ADDITIONAL/FILE_ATTRIBUTES/TAI93At0zOfGranule + + ... +\endcode + +There can be multiple objects or attributes with the same name in a file. How can you make sure +you are finding the correct object or attribute? You can first determine how many attributes +there are with a specified name, and then examine the paths to them. + +The -N option can be used to display all objects or attributes with a given name. +For example, there are four attributes with the name ScaleFactor in the OMI-Aura.he5 file, +as can be seen below with the -N option: +\code +h5dump -N ScaleFactor OMI-Aura.he5 +\endcode + +It outputs: +\code +HDF5 "OMI-Aura.he5" { +ATTRIBUTE "ScaleFactor" { + DATATYPE H5T_IEEE_F64LE + DATASPACE SIMPLE { ( 1 ) / ( 1 ) } + DATA { + (0): 1 + } +} +ATTRIBUTE "ScaleFactor" { + DATATYPE H5T_IEEE_F64LE + DATASPACE SIMPLE { ( 1 ) / ( 1 ) } + DATA { + (0): 1 + } +} +ATTRIBUTE "ScaleFactor" { + DATATYPE H5T_IEEE_F64LE + DATASPACE SIMPLE { ( 1 ) / ( 1 ) } + DATA { + (0): 1 + } +} +ATTRIBUTE "ScaleFactor" { + DATATYPE H5T_IEEE_F64LE + DATASPACE SIMPLE { ( 1 ) / ( 1 ) } + DATA { + (0): 1 + } +} +} +\endcode + +\subsection subsecViewToolsViewAttr_h5ls h5ls +If you include the -v (verbose) option for h5ls, you will see all of the attributes for the +specified file, dataset or group. You cannot display individual attributes. + +\section secViewToolsViewSub Dataset Subset + +\subsection subsecViewToolsViewSub_h5dump h5dump +If you have a very large dataset, you may wish to subset or see just a portion of the dataset. +This can be done with the following h5dump options. + + + + + + + + + + + + + + + + + + + + + + + + + +
OptionDescription
-d D, --dataset=D +Dataset D +
-s START, --start=START +Offset or start of subsetting selection +
-S STRIDE, --stride=STRIDE +Stride (sampling along a dimension). The default (unspecified, or 1) selects +every element along a dimension, a value of 2 selects every other element, +a value of 3 selects every third element, ... +
-c COUNT, --count=COUNT +Number of blocks to include in the selection +
-k BLOCK, --block=BLOCK +Size of the block in a hyperslab. The default (unspecified, or 1) is for +the block size to be the size of a single element. +
+ +The START (s), STRIDE (S), COUNT (c), and BLOCK (k) options +define the shape and size of the selection. They are arrays with the same number of dimensions as the rank +of the dataset's dataspace, and they all work together to define the selection. A change to one of +these arrays can affect the others. + +When specifying these h5dump options, a comma is used as the delimiter for each dimension in the +option value. For example, with a 2-dimensional dataset, the option value is specified as "H,W", +where H is the height and W is the width. If the offset is 0 for both dimensions, then +START would be specified as follows: +\code +-s "0,0" +\endcode + +There is also a shorthand way to specify these options with brackets at the end of the dataset name: +\code +-d DATASETNAME[s;S;c;k] +\endcode + +Multiple dimensions are separated by commas. For example, a subset for a 2-dimensional dataset would be specified as follows: +\code +-d DATASETNAME[s,s;S,S;c,c;k,k] +\endcode + +For a detailed understanding of how selections works, see the #H5Sselect_hyperslab API in the \ref RM. + +The dataset SolarZenithAngle in the OMI-Aura.he5 file can be used to illustrate these options. This +dataset is a 2-dimensional dataset of size 720 (height) x 1440 (width). Too much data will be displayed +by simply viewing the specified dataset with the -d option: +\code +h5dump -d "HDFEOS/GRIDS/OMI Column Amount O3/Data Fields/SolarZenithAngle" OMI-Aura.he5 +\endcode +Subsetting narrows down the output that is displayed. In the following example, the first +15x10 elements (-c "15,10") are specified, beginning with position (0,0) (-s "0,0"): +\code + h5dump -A 0 -d "HDFEOS/GRIDS/OMI Column Amount O3/Data Fields/SolarZenithAngle" -s "0,0" -c "15,10" -w 0 OMI-Aura.he5 +\endcode + +If using the shorthand method, specify: +\code + h5dump -A 0 -d "HDFEOS/GRIDS/OMI Column Amount O3/Data Fields/SolarZenithAngle[0,0;;15,10;]" -w 0 OMI-Aura.he5 +\endcode + +Where, +\par The -d option must be specified + +before +\par subsetting options (if not using the shorthand method). + +The -A 0 option suppresses the printing of attributes. + +The -w 0 option sets the number of columns of output to the maximum allowed value (65535). +This ensures that there are enough columns specified for displaying the data. + +Either command displays: +\code + HDF5 "OMI-Aura.he5" { + DATASET "HDFEOS/GRIDS/OMI Column Amount O3/Data Fields/SolarZenithAngle" { + DATATYPE H5T_IEEE_F32LE + DATASPACE SIMPLE { ( 720, 1440 ) / ( 720, 1440 ) } + SUBSET { + START ( 0, 0 ); + STRIDE ( 1, 1 ); + COUNT ( 15, 10 ); + BLOCK ( 1, 1 ); + DATA { + (0,0): 79.403, 79.403, 79.403, 79.403, 79.403, 79.403, 79.403, 79.403, 79.403, 79.403, + (1,0): 79.071, 79.071, 79.071, 79.071, 79.071, 79.071, 79.071, 79.071, 79.071, 79.071, + (2,0): 78.867, 78.867, 78.867, 78.867, 78.867, 78.867, 78.867, 78.867, 78.867, 78.867, + (3,0): 78.632, 78.632, 78.632, 78.632, 78.632, 78.632, 78.632, 78.632, 78.632, 78.632, + (4,0): 78.429, 78.429, 78.429, 78.429, 78.429, 78.429, 78.429, 78.429, 78.429, 78.429, + (5,0): 78.225, 78.225, 78.225, 78.225, 78.225, 78.225, 78.225, 78.225, 78.225, 78.225, + (6,0): 78.021, 78.021, 78.021, 78.021, 78.021, 78.021, 78.021, 78.021, 78.021, 78.021, + (7,0): 77.715, 77.715, 77.715, 77.715, 77.715, 77.715, 77.715, 77.715, 77.715, 77.715, + (8,0): 77.511, 77.511, 77.511, 77.511, 77.511, 77.511, 77.511, 77.511, 77.511, 77.511, + (9,0): 77.658, 77.658, 77.658, 77.307, 77.307, 77.307, 77.307, 77.307, 77.307, 77.307, + (10,0): 77.556, 77.556, 77.556, 77.556, 77.556, 77.556, 77.556, 77.556, 77.102, 77.102, + (11,0): 78.408, 78.408, 78.408, 78.408, 78.408, 78.408, 78.408, 78.408, 77.102, 77.102, + (12,0): 76.34, 78.413, 78.413, 78.413, 78.413, 78.413, 78.413, 78.413, 78.413, 78.413, + (13,0): 78.107, 78.107, 78.107, 78.107, 78.107, 78.107, 78.107, 78.107, 78.107, 77.195, + (14,0): 78.005, 78.005, 78.005, 78.005, 78.005, 78.005, 76.991, 76.991, 76.991, 76.991 + } + } + } + } +\endcode + +What if we wish to read three rows of three elements at a time (-c "3,3"), where each element +is a 2 x 3 block (-k "2,3") and we wish to begin reading from the second row (-s "1,0")? + +You can do that with the following command: +\code + h5dump -A 0 -d "HDFEOS/GRIDS/OMI Column Amount O3/Data Fields/SolarZenithAngle" + -s "1,0" -S "2,3" -c "3,3" -k "2,3" -w 0 OMI-Aura.he5 +\endcode + +In this case, the stride must be specified as 2 by 3 (or larger) to accommodate the reading of 2 by 3 blocks. +If it is smaller, the command will fail with the error, +\code +h5dump error: wrong subset selection; blocks overlap. +\endcode + +The output of the above command is shown below: +\code + HDF5 "OMI-Aura.he5" { + DATASET "HDFEOS/GRIDS/OMI Column Amount O3/Data Fields/SolarZenithAngle" { + DATATYPE H5T_IEEE_F32LE + DATASPACE SIMPLE { ( 720, 1440 ) / ( 720, 1440 ) } + SUBSET { + START ( 1, 0 ); + STRIDE ( 2, 3 ); + COUNT ( 3, 3 ); + BLOCK ( 2, 3 ); + DATA { + (1,0): 79.071, 79.071, 79.071, 79.071, 79.071, 79.071, 79.071, 79.071, 79.071, + (2,0): 78.867, 78.867, 78.867, 78.867, 78.867, 78.867, 78.867, 78.867, 78.867, + (3,0): 78.632, 78.632, 78.632, 78.632, 78.632, 78.632, 78.632, 78.632, 78.632, + (4,0): 78.429, 78.429, 78.429, 78.429, 78.429, 78.429, 78.429, 78.429, 78.429, + (5,0): 78.225, 78.225, 78.225, 78.225, 78.225, 78.225, 78.225, 78.225, 78.225, + (6,0): 78.021, 78.021, 78.021, 78.021, 78.021, 78.021, 78.021, 78.021, 78.021 + } + } + } + } +\endcode + +\section secViewToolsViewDtypes Datatypes + +\subsection subsecViewToolsViewDtypes_h5dump h5dump +The following datatypes are discussed, using the output of h5dump with HDF5 files from the +Examples by API page: +
    +
  • @ref subsubsecViewToolsViewDtypes_array
  • +
  • @ref subsubsecViewToolsViewDtypes_objref
  • +
  • @ref subsubsecViewToolsViewDtypes_regref
  • +
  • @ref subsubsecViewToolsViewDtypes_string
  • +
+ +\subsubsection subsubsecViewToolsViewDtypes_array Array +Users have been confused by the difference between an Array datatype (#H5T_ARRAY) and a dataset that +(has a dataspace that) is an array. + +Typically, these users want a dataset that has a simple datatype (like integer or float) that is an +array, like the following dataset /DS1. It has a datatype of #H5T_STD_I32LE (32-bit Little-Endian Integer) +and is a 4 by 7 array: +\code +$ h5dump h5ex_d_rdwr.h5 +HDF5 "h5ex_d_rdwr.h5" { +GROUP "/" { + DATASET "DS1" { + DATATYPE H5T_STD_I32LE + DATASPACE SIMPLE { ( 4, 7 ) / ( 4, 7 ) } + DATA { + (0,0): 0, -1, -2, -3, -4, -5, -6, + (1,0): 0, 0, 0, 0, 0, 0, 0, + (2,0): 0, 1, 2, 3, 4, 5, 6, + (3,0): 0, 2, 4, 6, 8, 10, 12 + } + } +} +} +\endcode + +Contrast that with the following dataset that has both an Array datatype and is an array: +\code +$ h5dump h5ex_t_array.h5 +HDF5 "h5ex_t_array.h5" { +GROUP "/" { + DATASET "DS1" { + DATATYPE H5T_ARRAY { [3][5] H5T_STD_I64LE } + DATASPACE SIMPLE { ( 4 ) / ( 4 ) } + DATA { + (0): [ 0, 0, 0, 0, 0, + 0, -1, -2, -3, -4, + 0, -2, -4, -6, -8 ], + (1): [ 0, 1, 2, 3, 4, + 1, 1, 1, 1, 1, + 2, 1, 0, -1, -2 ], + (2): [ 0, 2, 4, 6, 8, + 2, 3, 4, 5, 6, + 4, 4, 4, 4, 4 ], + (3): [ 0, 3, 6, 9, 12, + 3, 5, 7, 9, 11, + 6, 7, 8, 9, 10 ] + } + } +} +} +\endcode + +In this file, dataset /DS1 has a datatype of +\code +H5T_ARRAY { [3][5] H5T_STD_I64LE } +\endcode +and it also has a dataspace of +\code +SIMPLE { ( 4 ) / ( 4 ) } +\endcode +In other words, it is an array of four elements, in which each element is a 3 by 5 array of #H5T_STD_I64LE. + +This dataset is much more complex. Also note that subsetting cannot be done on Array datatypes. + +See this FAQ for more information on the Array datatype. + +\subsubsection subsubsecViewToolsViewDtypes_objref Object Reference +An Object Reference is a reference to an entire object (dataset, group, or named datatype). +A dataset with an Object Reference datatype consists of one or more Object References. +An Object Reference dataset can be used as an index to an HDF5 file. + +The /DS1 dataset in the following file (h5ex_t_objref.h5) is an Object Reference dataset. +It contains two references, one to group /G1 and the other to dataset /DS2: +\code +$ h5dump h5ex_t_objref.h5 +HDF5 "h5ex_t_objref.h5" { +GROUP "/" { + DATASET "DS1" { + DATATYPE H5T_REFERENCE { H5T_STD_REF_OBJECT } + DATASPACE SIMPLE { ( 2 ) / ( 2 ) } + DATA { + (0): GROUP 1400 /G1 , DATASET 800 /DS2 + } + } + DATASET "DS2" { + DATATYPE H5T_STD_I32LE + DATASPACE NULL + DATA { + } + } + GROUP "G1" { + } +} +} +\endcode + +\subsubsection subsubsecViewToolsViewDtypes_regref Region Reference +A Region Reference is a reference to a selection within a dataset. A selection can be either +individual elements or a hyperslab. In h5dump you will see the name of the dataset along with +the elements or slab that is selected. A dataset with a Region Reference datatype consists of +one or more Region References. + +An example of a Region Reference dataset (h5ex_t_regref.h5) can be found on the +Examples by API page, +under Datatypes. If you examine this dataset with h5dump you will see that /DS1 is a +Region Reference dataset as indicated by its datatype, highlighted in bold below: +\code +$ h5dump h5ex_t_regref.h5 +HDF5 "h5ex_t_regref.h5" { +GROUP "/" { + DATASET "DS1" { + DATATYPE H5T_REFERENCE { H5T_STD_REF_DSETREG } + DATASPACE SIMPLE { ( 2 ) / ( 2 ) } + DATA { + DATASET /DS2 {(0,1), (2,11), (1,0), (2,4)}, + DATASET /DS2 {(0,0)-(0,2), (0,11)-(0,13), (2,0)-(2,2), (2,11)-(2,13)} + } + } + DATASET "DS2" { + DATATYPE H5T_STD_I8LE + DATASPACE SIMPLE { ( 3, 16 ) / ( 3, 16 ) } + DATA { + (0,0): 84, 104, 101, 32, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, + (0,14): 110, 0, + (1,0): 102, 111, 120, 32, 106, 117, 109, 112, 115, 32, 111, 118, 101, + (1,13): 114, 32, 0, + (2,0): 116, 104, 101, 32, 53, 32, 108, 97, 122, 121, 32, 100, 111, 103, + (2,14): 115, 0 + } + } +} +} +\endcode + +It contains two Region References: +\li A selection of four individual elements in dataset /DS2 : (0,1), (2,11), (1,0), (2,4) +See the #H5Sselect_elements API in the \ref UG for information on selecting individual elements. +\li A selection of these blocks in dataset /DS2 : (0,0)-(0,2), (0,11)-(0,13), (2,0)-(2,2), (2,11)-(2,13) +See the #H5Sselect_hyperslab API in the \ref UG for how to do hyperslab selection. + + +If you look at the code that creates the dataset (h5ex_t_regref.c) you will see that the +first reference is created with these calls: +\code + status = H5Sselect_elements (space, H5S_SELECT_SET, 4, coords[0]); + status = H5Rcreate (&wdata[0], file, DATASET2, H5R_DATASET_REGION, space); +\endcode + +where the buffer containing the coordinates to select is: +\code + coords[4][2] = { {0, 1}, + {2, 11}, + {1, 0}, + {2, 4} }, +\endcode + +The second reference is created by calling, +\code + status = H5Sselect_hyperslab (space, H5S_SELECT_SET, start, stride, count, block); + status = H5Rcreate (&wdata[1], file, DATASET2, H5R_DATASET_REGION, space); +\endcode +where start, stride, count, and block have these values: +\code + start[2] = {0, 0}, + stride[2] = {2, 11}, + count[2] = {2, 2}, + block[2] = {1, 3}; +\endcode + +These start, stride, count, and block values will select the elements shown in bold in the dataset: +\code +84 104 101 32 113 117 105 99 107 32 98 114 111 119 110 0 +102 111 120 32 106 117 109 112 115 32 111 118 101 114 32 0 +116 104 101 32 53 32 108 97 122 121 32 100 111 103 115 0 +\endcode + +If you use h5dump to select a subset of dataset +/DS2 with these start, stride, count, and block values, you will see that the same elements are selected: +\code +$ h5dump -d "/DS2" -s "0,0" -S "2,11" -c "2,2" -k "1,3" h5ex_t_regref.h5 +HDF5 "h5ex_t_regref.h5" { +DATASET "/DS2" { + DATATYPE H5T_STD_I8LE + DATASPACE SIMPLE { ( 3, 16 ) / ( 3, 16 ) } + SUBSET { + START ( 0, 0 ); + STRIDE ( 2, 11 ); + COUNT ( 2, 2 ); + BLOCK ( 1, 3 ); + DATA { + (0,0): 84, 104, 101, 114, 111, 119, + (2,0): 116, 104, 101, 100, 111, 103 + } + } +} +} +\endcode + +For more information on selections, see the tutorial topic on +@ref LBDsetSubRW. Also see the +\ref secViewToolsViewSub tutorial topic on using h5dump to view a subset. + +\subsubsection subsubsecViewToolsViewDtypes_string String +There are two types of string data, fixed length strings and variable length strings. + +Below is the h5dump output for two files that have the same strings written to them. In one file, +the strings are fixed in length, and in the other, the strings have different sizes (and are variable in size). + +Dataset of Fixed Length Strings +\code +HDF5 "h5ex_t_string.h5" { +GROUP "/" { + DATASET "DS1" { + DATATYPE H5T_STRING { + STRSIZE 7; + STRPAD H5T_STR_SPACEPAD; + CSET H5T_CSET_ASCII; + CTYPE H5T_C_S1; + } + DATASPACE SIMPLE { ( 4 ) / ( 4 ) } + DATA { + (0): "Parting", "is such", "sweet ", "sorrow." + } + } +} +} +\endcode + +Dataset of Variable Length Strings +\code +HDF5 "h5ex_t_vlstring.h5" { +GROUP "/" { + DATASET "DS1" { + DATATYPE H5T_STRING { + STRSIZE H5T_VARIABLE; + STRPAD H5T_STR_SPACEPAD; + CSET H5T_CSET_ASCII; + CTYPE H5T_C_S1; + } + DATASPACE SIMPLE { ( 4 ) / ( 4 ) } + DATA { + (0): "Parting", "is such", "sweet", "sorrow." + } + } +} +} +\endcode + +You might wonder which to use. Some comments to consider are included below. +\li In general, a variable length string dataset is more complex than a fixed length string. If you don't +specifically need a variable length type, then just use the fixed length string. +\li A variable length dataset consists of pointers to heaps in different locations in the file. For this +reason, a variable length dataset cannot be compressed. (Basically, the pointers get compressed and +not the actual data!) If compression is needed, then do not use variable length types. +\li If you need to create an array of of different length strings, you can either use fixed length strings +along with compression, or use a variable length string. + +
+Navigate back: \ref index "Main" / \ref GettingStarted / \ref ViewToolsCommand + +*/ diff --git a/doxygen/dox/ViewTools2.dox b/doxygen/dox/ViewTools2.dox new file mode 100644 index 0000000..4d8788a --- /dev/null +++ b/doxygen/dox/ViewTools2.dox @@ -0,0 +1,786 @@ +/** @page ViewToolsEdit Command-line Tools For Editing HDF5 Files +Navigate back: \ref index "Main" / \ref GettingStarted / \ref ViewToolsCommand +
+ +\section secViewToolsEditTOC Contents +
    +
  • \ref secViewToolsEditRemove
  • +
  • \ref secViewToolsEditChange
  • +
  • \ref secViewToolsEditApply
  • +
  • \ref secViewToolsEditCopy
  • +
  • \ref secViewToolsEditAdd
  • +
+ +\section secViewToolsEditRemove Remove Inaccessible Objects and Unused Space in a File +HDF5 files may accumulate unused space when they are read and rewritten to or if objects are deleted within +them. With many edits and deletions this unused space can add up to a sizable amount. + +The h5repack tool can be used to remove unused space in an HDF5 +file. If no options other than the input and output HDF5 files are specified on the +h5repack command line, it will write the file to the new +file, getting rid of the unused space: +\code +h5repack +\endcode + +\section secViewToolsEditChange Change a Dataset's Storage Layout +The h5repack utility can be used to change a dataset's storage +layout. By default, the storage layout of a dataset is defined at creation time and it cannot be changed. +However, with h5repack you can write an HDF5 file to a new file and change the layout for objects in the new file. + +The -l option in h5repack +is used to change the layout for an object. The string following the -l +option defines the layout type and parameters for specified objects (or all objects): +\code +h5repack -l [list of objects:]= +\endcode + +If no object is specified, then everything in the input file will be written to the output file with the specified +layout type and parameters. If objects are specified then everything in the input file will be written to the +output file as is, except for those specified objects. They will be written to the output file with the given +layout type and parameters. + +Following is a description of the dataset layouts and the h5repack +options to use to change a dataset: + + + + + + + + + + + + + + + + + + + +
Storage Layouth5repack OptionDescription
Contiguous +CONTI +Data is stored physically together +
Chunked +CHUNK=DIM[xDIM...xDIM] +Data is stored in DIM[xDIM...xDIM] chunks +
Compact +COMPA +Data is stored in the header of the object (less I/O) +
+ +If you type h5repack -h on the command line, you will see +a detailed usage statement with examples of modifying the layout. + +In the following example, the dataset /dset in the file +dset.h5 is contiguous, as shown by the h5dump -pH command. +The h5repack utility writes dset.h5 to a new file, dsetrpk.h5, +where the dataset dset is chunked. This can be seen by examining +the resulting dsetrpk.h5 file with h5dump, as shown: +\code +$ h5dump -pH dset.h5 +HDF5 "dset.h5" { +GROUP "/" { + DATASET "dset" { + DATATYPE H5T_STD_I32BE + DATASPACE SIMPLE { ( 4, 6 ) / ( 4, 6 ) } + STORAGE_LAYOUT { + CONTIGUOUS + SIZE 96 + OFFSET 1400 + } + FILTERS { + NONE + } + FILLVALUE { + FILL_TIME H5D_FILL_TIME_IFSET + VALUE 0 + } + ALLOCATION_TIME { + H5D_ALLOC_TIME_LATE + } + } +} +} + +$ h5repack -l dset:CHUNK=4x6 dset.h5 dsetrpk.h5 + +$ h5dump -pH dsetrpk.h5 +HDF5 "dsetrpk.h5" { +GROUP "/" { + DATASET "dset" { + DATATYPE H5T_STD_I32BE + DATASPACE SIMPLE { ( 4, 6 ) / ( 4, 6 ) } + STORAGE_LAYOUT { + CHUNKED ( 4, 6 ) + SIZE 96 + } + FILTERS { + NONE + } + FILLVALUE { + FILL_TIME H5D_FILL_TIME_IFSET + VALUE 0 + } + ALLOCATION_TIME { + H5D_ALLOC_TIME_INCR + } + } +} +} +\endcode + +There can be many reasons that the storage layout needs to be changed for a dataset. For example, +there may be a performance issue with a dataset due to a small chunk size. + +\section secViewToolsEditApply Apply Compression Filter to a Dataset +The h5repack utility can be used to compress or +remove compression from a dataset in a file. By default, compression cannot be added to or removed +from a dataset once it has been created. However, with h5repack +you can write a file to a new file and specify a compression filter to apply to a dataset or datasets in the new file. + +To apply a filter to an object in an HDF5 file, specify the -f option, +where the string following the -f option defines the filter and +its parameters (if there are any) to apply to a given object or objects: +\code +h5repack -f [list of objects:]= +\endcode + +If no objects are specified then everything in the input file will be written to the output file with +the filter and parameters specified. If objects are specified, then everything in the input file will +be written to the output file as is, except for the specified objects. They will be written to the +output file with the filter and parameters specified. + +If you type h5repack --help on the command line, +you will see a detailed usage statement with examples of modifying a filter. There are actually +numerous filters that you can apply to a dataset: + + +Options + + + + + + + + + + + + + + + + + + + + + + + + + + +
Filter
GZIP compression (levels 1-9) +GZIP=<deflation level> +
SZIP compression +SZIP= +
Shuffle filter +SHUF +
Checksum filter +FLET +
NBIT compression +NBIT +
HDF5 Scale/Offset filter +SOFF= +
User defined filter +UD= +
Remove ALL filters +NONE +
+ +Be aware that a dataset must be chunked to apply compression to it. If the dataset is not already chunked, +then h5repack will apply chunking to it. Both chunking +and compression cannot be applied to a dataset at the same time with h5repack. + +In the following example, +\li h5dump lists the properties for the objects in dset.h5. Note that the dataset dset is contiguous. +\li h5repack writes dset.h5 into a new file dsetrpk.h5, applying GZIP Level 5 compression to the dataset /dset in dsetrpk.h5. +\li h5dump lists the properties for the new dsetrpk.h5 file. Note that /dset is both compressed and chunked. + +Example +\code +$ h5dump -pH dset.h5 +HDF5 "dset.h5" { +GROUP "/" { + DATASET "dset" { + DATATYPE H5T_STD_I32BE + DATASPACE SIMPLE { ( 12, 18 ) / ( 12, 18 ) } + STORAGE_LAYOUT { + CONTIGUOUS + SIZE 864 + OFFSET 1400 + } + FILTERS { + NONE + } + FILLVALUE { + FILL_TIME H5D_FILL_TIME_IFSET + VALUE 0 + } + ALLOCATION_TIME { + H5D_ALLOC_TIME_LATE + } + } +} +} + +$ h5repack -f dset:GZIP=5 dset.h5 dsetrpk.h5 + +$ h5dump -pH dsetrpk.h5 +HDF5 "dsetrpk.h5" { +GROUP "/" { + DATASET "dset" { + DATATYPE H5T_STD_I32BE + DATASPACE SIMPLE { ( 12, 18 ) / ( 12, 18 ) } + STORAGE_LAYOUT { + CHUNKED ( 12, 18 ) + SIZE 160 (5.400:1 COMPRESSION) + } + FILTERS { + COMPRESSION DEFLATE { LEVEL 5 } + } + FILLVALUE { + FILL_TIME H5D_FILL_TIME_IFSET + VALUE 0 + } + ALLOCATION_TIME { + H5D_ALLOC_TIME_INCR + } + } +} +} +\endcode + +\section secViewToolsEditCopy Copy Objects to Another File +The h5copy utility can be used to copy an object or +objects from one HDF5 file to another or to a different location in the same file. It uses the +#H5Ocopy and #H5Lcopy APIs in HDF5. + +Following are some of the options that can be used with h5copy. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
h5copy OptionsDescription
-i, --input +Input file name +
-o, --output +Output file name +
-s, --source +Source object name +
-d, --destination +Destination object name +
-p, --parents +Make parent groups as needed +
-v, --verbose +Verbose mode +
-f, --flag +Flag type +
+ +For a complete list of options and information on using h5copy, type: +\code +h5copy --help +\endcode + +In the example below, the dataset /MyGroup/Group_A/dset2 +in groups.h5 gets copied to the root +("/") group of a new file, +newgroup.h5, with the name +dset3: +\code +$h5dump -H groups.h5 +HDF5 "groups.h5" { +GROUP "/" { + GROUP "MyGroup" { + GROUP "Group_A" { + DATASET "dset2" { + DATATYPE H5T_STD_I32BE + DATASPACE SIMPLE { ( 2, 10 ) / ( 2, 10 ) } + } + } + GROUP "Group_B" { + } + DATASET "dset1" { + DATATYPE H5T_STD_I32BE + DATASPACE SIMPLE { ( 3, 3 ) / ( 3, 3 ) } + } + } +} +} + +$ h5copy -i groups.h5 -o newgroup.h5 -s /MyGroup/Group_A/dset2 -d /dset3 + +$ h5dump -H newgroup.h5 +HDF5 "newgroup.h5" { +GROUP "/" { + DATASET "dset3" { + DATATYPE H5T_STD_I32BE + DATASPACE SIMPLE { ( 2, 10 ) / ( 2, 10 ) } + } +} +} +\endcode + +There are also h5copy flags that can be specified +with the -f option. In the example below, the +-f shallow option specifies to copy only the +immediate members of the group /MyGroup from +the groups.h5 file mentioned above to a new +file mygrouponly.h5: +\code +h5copy -v -i groups.h5 -o mygrouponly.h5 -s /MyGroup -d /MyGroup -f shallow +\endcode + +The output of the above command is shown below. The verbose option -v +describes the action that was taken, as shown in the highlighted text. +\code +Copying file and object to file and object +Using shallow flag + +$ h5dump -H mygrouponly.h5 +HDF5 "mygrouponly.h5" { +GROUP "/" { + GROUP "MyGroup" { + GROUP "Group_A" { + } + GROUP "Group_B" { + } + DATASET "dset1" { + DATATYPE H5T_STD_I32BE + DATASPACE SIMPLE { ( 3, 3 ) / ( 3, 3 ) } + } + } +} +} +\endcode + +\section secViewToolsEditAdd Add or Remove User Block from File +The user block is a space in an HDF5 file that is not interpreted by the HDF5 library. It is a property +list that can be added when creating a file. See the #H5Pset_userblock API in the \ref RM for more +information regarding this property. + +Once created in a file, the user block cannot be removed. However, you can use the +h5jam and h5unjam +utilities to add or remove a user block from a file into a new file. + +These two utilities work similarly, except that h5jam +adds a user block to a file and h5unjam removes the user +block. You can also overwrite or delete a user block in a file. + +Specify the -h option to see a complete list of options +that can be used with h5jam and +h5unjam. For example: +\code + h5jam -h + h5unjam -h +\endcode + +Below are the basic options for adding or removing a user block with h5jam +and h5unjam: + + + + + + + + + + + + + + + + + +
h5copy OptionsDescription
-i +Input File +
-o +Output File +
-u +File to add or remove from user block +
+ +Let's say you wanted to add the program that creates an HDF5 file to its user block. As an example, you +can take the h5_crtgrpar.c program from the +\ref LBExamples +and add it to the file it creates, groups.h5. This can +be done with h5jam, as follows: +\code +h5jam -i groups.h5 -u h5_crtgrpar.c -o groupsub.h5 +\endcode + +You can actually view the file with more groupsub.h5 +to see that the h5_crtgrpar.c file is indeed included. + +To remove the user block that was just added, type: +\code +h5unjam -i groupsub.h5 -u h5_crtgrparNEW.c -o groups-noub.h5 +\endcode + +This writes the user block in the file groupsub.h5 +into h5_crtgrparNEW.c. The new HDF5 file, +groups-noub.h5, will not contain a user block. + +
+Navigate back: \ref index "Main" / \ref GettingStarted / \ref ViewToolsCommand + +*/ + +/** @page ViewToolsConvert Command-line Tools For Converting HDF5 Files +Navigate back: \ref index "Main" / \ref GettingStarted / \ref ViewToolsCommand +
+ +\section secViewToolsConvertTOC Contents +
    +
  • \ref secViewToolsConvertASCII
  • +
  • \ref secViewToolsConvertBinary
  • +
  • \ref secViewToolsConvertExport
  • +
+ +\section secViewToolsConvertASCII Output HDF5 Dataset into an ASCII File (to Import into Excel and Other Applications) +The h5dump utility can be used to convert an HDF5 dataset +into an ASCII file, which can then be imported into Excel and other applications. The following options are used: + + + + + + + + + + + + + + + + + + + + +
OptionsDescription
-d D, --dataset=D +Display dataset D +
-o F, --output=F +Output raw data into file F +
-y, --noindex +Suppress printing of array indices with the data +
-w N, --width=N +Set N number of columns of output. A value of 0 +sets the number to 65535 (the maximum) +
+ +As an example, h5_crtdat.c from the \ref LBDsetCreate +HDF5 Tutorial topic, creates the file dset.h5 with +a dataset /dset that is a 4 x 6 integer array. The +following is displayed when viewing dset.h5 with +h5dump: +\code +$ h5dump dset.h5 +HDF5 "dset.h5" { +GROUP "/" { + DATASET "dset" { + DATATYPE H5T_STD_I32BE + DATASPACE SIMPLE { ( 4, 6 ) / ( 4, 6 ) } + DATA { + (0,0): 1, 2, 3, 4, 5, 6, + (1,0): 7, 8, 9, 10, 11, 12, + (2,0): 13, 14, 15, 16, 17, 18, + (3,0): 19, 20, 21, 22, 23, 24 + } + } +} +} +\endcode + +The following command will output the values of the /dset +dataset to the ASCII file dset.asci: +\code +h5dump -d /dset -o dset.asci -y -w 50 dset.h5 +\endcode + +In particular, note that: +\li The default behavior of h5dump is to print indices, +and the -y option suppresses this. +\li The -w 50 option tells +h5dump to allow 50 columns for outputting the data. The +value specified must be large enough to accommodate the dimension size of the dataset multiplied by the +number of positions and spaces needed to print each value. If the value is not large enough, the output +will wrap to the next line, and the data will not display as expected in Excel or other applications. To +ensure that the output does not wrap to the next line, you can also specify 0 (zero) for the +-w option. + +In addition to creating the ASCII file dset.asci, the +above command outputs the metadata of the specified dataset: +\code +HDF5 "dset.h5" { +DATASET "/dset" { + DATATYPE H5T_STD_I32BE + DATASPACE SIMPLE { ( 4, 6 ) / ( 4, 6 ) } + DATA { + } +} +} +\endcode + +The dset.asci file will contain the values for the dataset: +\code + 1, 2, 3, 4, 5, 6, + 7, 8, 9, 10, 11, 12, + 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24 +\endcode + +\section secViewToolsConvertBinary Output HDF5 Dataset into Binary File +The h5dump utility can be used to convert an +HDF5 dataset to a binary file with the following options: + + + + + + + + + + + + + + + + +
OptionsDescription
-d D, --dataset=D +Display dataset D +
-o F, --output=F +Output raw data into file F +
-b B, --binary=B +Binary file output of form B. +Valid values are: LE, BE, NATIVE, FILE +
+ +As an example, h5_crtdat.c from the +\ref LBDsetCreate HDF5 Tutorial topic, creates the file dset.h5 with a dataset +/dset that is a 4 x 6 integer array. The +following is displayed when viewing dset.h5 +with h5dump: +\code +$ h5dump -d /dset/ dset.h5 +HDF5 "dset.h5" { +DATASET "/dset/" { + DATATYPE H5T_STD_I32BE + DATASPACE SIMPLE { ( 4, 6 ) / ( 4, 6 ) } + DATA { + (0,0): 1, 2, 3, 4, 5, 6, + (1,0): 7, 8, 9, 10, 11, 12, + (2,0): 13, 14, 15, 16, 17, 18, + (3,0): 19, 20, 21, 22, 23, 24 + } +} +} +\endcode + +As specified by the -d and +-o options, the following +h5dump command will output the values of the dataset +/dset to a file called +dset.bin. The -b +option specifies that the output will be binary in Little Endian format (LE). + +\code +h5dump -d /dset -b LE -o dset.bin dset.h5 +\endcode + +This command outputs the metadata for the dataset, as well as creating the binary file +dset.bin: +\code +HDF5 "dset.h5" { +DATASET "/dset" { + DATATYPE H5T_STD_I32BE + DATASPACE SIMPLE { ( 4, 6 ) / ( 4, 6 ) } + DATA { + } +} +} +\endcode + +If you look at the resulting dset.bin file with +a binary editor, you will see that it contains the dataset's values. For example (on Linux) you will see: +\code +$ od -t d dset.bin +0000000 1 2 3 4 +0000020 5 6 7 8 +0000040 9 10 11 12 +0000060 13 14 15 16 +0000100 17 18 19 20 +0000120 21 22 23 24 +0000140 +\endcode + +\section secViewToolsConvertExport Export from h5dump and Import into HDF5 +The h5import utility can use the output of +h5dump as input to create a dataset or file. + +The h5dump utility must first create two files: +\li A DDL file, which will be used as an h5import configuration file +\li A raw data file containing the data to be imported + +The DDL file must be generated with the h5dump -p option, to generate properties. + +The raw data file that can be imported into HDF5 using this method may contain either numeric or string data with the following restrictions: +\li Numeric data requires the use of the h5dump -b option to produce a binary data file. +\li String data must be written with the h5dump -y and +--width=1 options, generating a single column of strings without indices. + +Two examples follow: the first imports a dataset with a numeric datatype. Note that numeric data requires +the use of the h5dump -b option to produce a binary data +file. The example program (h5_crtdat.c) that creates this +file is included with the \ref IntroHDF5 tutorial and can be obtained from the \ref LBExamples page: +\code +h5dump -p -d "/dset" --ddl=dsetbin.dmp -o dset.bin -b dset.h5 +h5import dset.bin -c dsetbin.dmp -o new-dset.h5 +\endcode + +The output before and after running these commands is shown below: +\code +$ h5dump dset.h5 +HDF5 "dset.h5" { +GROUP "/" { + DATASET "dset" { + DATATYPE H5T_STD_I32BE + DATASPACE SIMPLE { ( 4, 6 ) / ( 4, 6 ) } + DATA { + (0,0): 1, 2, 3, 4, 5, 6, + (1,0): 7, 8, 9, 10, 11, 12, + (2,0): 13, 14, 15, 16, 17, 18, + (3,0): 19, 20, 21, 22, 23, 24 + } + } +} +} +$ h5dump -p -d "/dset" --ddl=dsetbin.dmp -o dset.bin -b dset.h5 + +$ h5import dset.bin -c dsetbin.dmp -o new-dset.h5 + +$ h5dump new-dset.h5 +HDF5 "new-dset.h5" { +GROUP "/" { + DATASET "dset" { + DATATYPE H5T_STD_I32BE + DATASPACE SIMPLE { ( 4, 6 ) / ( 4, 6 ) } + DATA { + (0,0): 1, 2, 3, 4, 5, 6, + (1,0): 7, 8, 9, 10, 11, 12, + (2,0): 13, 14, 15, 16, 17, 18, + (3,0): 19, 20, 21, 22, 23, 24 + } + } +} +} +\endcode + +The second example imports string data. The example program that creates this file can be downloaded +from the Examples by API page. + +Note that string data requires use of the h5dump -y +option to exclude indexes and the h5dump --width=1 +option to generate a single column of strings. The -o +option outputs the data into an ASCII file. +\code +h5dump -p -d "/DS1" -O vlstring.dmp -o vlstring.ascii -y --width=1 h5ex_t_vlstring.h5 +h5import vlstring.ascii -c vlstring.dmp -o new-vlstring.h5 +\endcode + +The output before and after running these commands is shown below: +\code +$ h5dump h5ex_t_vlstring.h5 +HDF5 "h5ex_t_vlstring.h5" { +GROUP "/" { + DATASET "DS1" { + DATATYPE H5T_STRING { + STRSIZE H5T_VARIABLE; + STRPAD H5T_STR_SPACEPAD; + CSET H5T_CSET_ASCII; + CTYPE H5T_C_S1; + } + DATASPACE SIMPLE { ( 4 ) / ( 4 ) } + DATA { + (0): "Parting", "is such", "sweet", "sorrow." + } + } +} +} + +$ h5dump -p -d "/DS1" -O vlstring.dmp -o vlstring.ascii -y --width=1 h5ex_t_vlstring.h5 + +$ h5import vlstring.ascii -c vlstring.dmp -o new-vlstring.h5 + +$ h5dump new-vlstring.h5 +HDF5 "new-vlstring.h5" { +GROUP "/" { + DATASET "DS1" { + DATATYPE H5T_STRING { + STRSIZE H5T_VARIABLE; + STRPAD H5T_STR_NULLTERM; + CSET H5T_CSET_ASCII; + CTYPE H5T_C_S1; + } + DATASPACE SIMPLE { ( 4 ) / ( 4 ) } + DATA { + (0): "Parting", "is such", "sweet", "sorrow." + } + } +} +\endcode + +
+Navigate back: \ref index "Main" / \ref GettingStarted / \ref ViewToolsCommand + +*/ diff --git a/doxygen/dox/ViewToolsJPSS.dox b/doxygen/dox/ViewToolsJPSS.dox new file mode 100644 index 0000000..9c15395 --- /dev/null +++ b/doxygen/dox/ViewToolsJPSS.dox @@ -0,0 +1,763 @@ +/** @page ViewToolsJPSS Use Case: Examining a JPSS NPP File With HDF5 Tools +Navigate back: \ref index "Main" / \ref GettingStarted / \ref ViewToolsCommand +
+ +\section secViewToolsJPSSTOC Contents +
    +
  • \ref secViewToolsJPSSDeter
  • +
  • \ref secViewToolsJPSSView
  • +
  • \ref secViewToolsJPSSExam
  • +
+ +This tutorial illustrates how to use the HDF5 tools to examine NPP files from the JPSS project. The following files are discussed: +\code +SVM09_npp_d20120229_t0849107_e0854511_b01759_c20120229145452682127_noaa_ops.h5 (gzipped file) +SVM01_npp_d20130524_t1255132_e1256374_b08146_c20130524192048864992_noaa_ops.h5 (gzipped file) +\endcode + +\section secViewToolsJPSSDeter Determining File Contents +The first thing you may want to do is determine what is in your file. You can use the command-line tools or HDFView to do this: +\li @ref subsecViewToolsJPSSDeter_h5dump +\li @ref subsecViewToolsJPSSDeter_h5ls +\li @ref subsecViewToolsJPSSDeter_HDFView + +JPSS NPP files all contain two root level groups: + + + + + + + + + + + + +
GroupDescription
/All_Data +Contains the raw data and optional geo-location information. +
/Data_Products +Contains a dataset ending in Aggr with +references to objects in the /All_Data group. +Contains granules (datasets with a name ending in Gran_#) +with references to selected regions in datasets under /All_Data. +
+ +\subsection subsecViewToolsJPSSDeter_h5dump h5dump +With h5dump you can see a list of the objects +in the file using the -n option: +\code +h5dump -n +\endcode + +For example: +\code +$ h5dump -n SVM09_npp_d20120229_t0849107_e0854511_b01759_c20120229145452682127_noaa_ops.h5 +HDF5 "SVM09_npp_d20120229_t0849107_e0854511_b01759_c20120229145452682127_noaa_ops.h5" { +FILE_CONTENTS { + group / + group /All_Data + group /All_Data/VIIRS-M9-SDR_All + dataset /All_Data/VIIRS-M9-SDR_All/ModeGran + dataset /All_Data/VIIRS-M9-SDR_All/ModeScan + dataset /All_Data/VIIRS-M9-SDR_All/NumberOfBadChecksums + dataset /All_Data/VIIRS-M9-SDR_All/NumberOfDiscardedPkts + dataset /All_Data/VIIRS-M9-SDR_All/NumberOfMissingPkts + dataset /All_Data/VIIRS-M9-SDR_All/NumberOfScans + dataset /All_Data/VIIRS-M9-SDR_All/PadByte1 + dataset /All_Data/VIIRS-M9-SDR_All/QF1_VIIRSMBANDSDR + dataset /All_Data/VIIRS-M9-SDR_All/QF2_SCAN_SDR + dataset /All_Data/VIIRS-M9-SDR_All/QF3_SCAN_RDR + dataset /All_Data/VIIRS-M9-SDR_All/QF4_SCAN_SDR + dataset /All_Data/VIIRS-M9-SDR_All/QF5_GRAN_BADDETECTOR + dataset /All_Data/VIIRS-M9-SDR_All/Radiance + dataset /All_Data/VIIRS-M9-SDR_All/RadianceFactors + dataset /All_Data/VIIRS-M9-SDR_All/Reflectance + dataset /All_Data/VIIRS-M9-SDR_All/ReflectanceFactors + group /Data_Products + group /Data_Products/VIIRS-M9-SDR + dataset /Data_Products/VIIRS-M9-SDR/VIIRS-M9-SDR_Aggr + dataset /Data_Products/VIIRS-M9-SDR/VIIRS-M9-SDR_Gran_0 + dataset /Data_Products/VIIRS-M9-SDR/VIIRS-M9-SDR_Gran_1 + dataset /Data_Products/VIIRS-M9-SDR/VIIRS-M9-SDR_Gran_2 + dataset /Data_Products/VIIRS-M9-SDR/VIIRS-M9-SDR_Gran_3 + } +} +\endcode + +In the output above you can see that there are four granules (ending in +Gran_#) in the +/Data_Products/VIIRS-M9-SDR/ group. + +\subsection subsecViewToolsJPSSDeter_h5ls h5ls +With h5ls you can see a list of the objects in the +file using the -lr +options. The h5ls utility also shows shape and size +(dataspace) information about datasets. +\code +h5ls -lr +\endcode + +For example: +\code +$ h5ls -lr SVM09_npp_d20120229_t0849107_e0854511_b01759_c20120229145452682127_noaa_ops.h5 +/ Group +/All_Data Group +/All_Data/VIIRS-M9-SDR_All Group +/All_Data/VIIRS-M9-SDR_All/ModeGran Dataset {4/Inf} +/All_Data/VIIRS-M9-SDR_All/ModeScan Dataset {192/Inf} +/All_Data/VIIRS-M9-SDR_All/NumberOfBadChecksums Dataset {192/Inf} +/All_Data/VIIRS-M9-SDR_All/NumberOfDiscardedPkts Dataset {192/Inf} +/All_Data/VIIRS-M9-SDR_All/NumberOfMissingPkts Dataset {192/Inf} +/All_Data/VIIRS-M9-SDR_All/NumberOfScans Dataset {4/Inf} +/All_Data/VIIRS-M9-SDR_All/PadByte1 Dataset {12/Inf} +/All_Data/VIIRS-M9-SDR_All/QF1_VIIRSMBANDSDR Dataset {3072/Inf, 3200/Inf} +/All_Data/VIIRS-M9-SDR_All/QF2_SCAN_SDR Dataset {192/Inf} +/All_Data/VIIRS-M9-SDR_All/QF3_SCAN_RDR Dataset {192/Inf} +/All_Data/VIIRS-M9-SDR_All/QF4_SCAN_SDR Dataset {3072/Inf} +/All_Data/VIIRS-M9-SDR_All/QF5_GRAN_BADDETECTOR Dataset {64/Inf} +/All_Data/VIIRS-M9-SDR_All/Radiance Dataset {3072/Inf, 3200/Inf} +/All_Data/VIIRS-M9-SDR_All/RadianceFactors Dataset {8/Inf} +/All_Data/VIIRS-M9-SDR_All/Reflectance Dataset {3072/Inf, 3200/Inf} +/All_Data/VIIRS-M9-SDR_All/ReflectanceFactors Dataset {8/Inf} +/Data_Products Group +/Data_Products/VIIRS-M9-SDR Group +/Data_Products/VIIRS-M9-SDR/VIIRS-M9-SDR_Aggr Dataset {16/Inf} +/Data_Products/VIIRS-M9-SDR/VIIRS-M9-SDR_Gran_0 Dataset {16/Inf} +/Data_Products/VIIRS-M9-SDR/VIIRS-M9-SDR_Gran_1 Dataset {16/Inf} +/Data_Products/VIIRS-M9-SDR/VIIRS-M9-SDR_Gran_2 Dataset {16/Inf} +/Data_Products/VIIRS-M9-SDR/VIIRS-M9-SDR_Gran_3 Dataset {16/Inf} +\endcode +Note that the Inf indicates that those datasets are appendable or unlimited in size. + +\subsection subsecViewToolsJPSSDeter_HDFView HDFView +If you open the file in HDFView, it will display the file and the root level groups within +it in the TreeView on the left. An HDF5 file is a folder with a "5" in the middle, followed +by the file name. There are two folders (groups) within the JPSS file +(All_Data/ and Data Products/), +which you can select to see their contents: + + + + +
+\image html hdfview-tree.png +
+ +If you click twice with the left-mouse button on a folder or group in the TreeView, the contents +of the folder will be listed. If you click twice on an object such as a dataset, a window with +the object's values will be displayed. + +Underneath the VIIRS-M1-SDR folder are what HDF5 +calls datasets. The scarlet letter "A" attached +to the group and datasets under Data_Products/ +indicates that there are attributes associated with them. + +\section secViewToolsJPSSView Viewing the User Block +All JPSS files contain a user block in XML with information about the file. The user block is an +optional space allocated at the beginning of an HDF5 file that is not interpreted by the HDF5 +library. Its size is a multiple of 512. + +Since the user block in JPSS files is stored in ASCII and it is stored at the beginning of an +HDF5 file, you could use a text editor or viewer to examine it. However, there are HDF5 utilities +that can help with this: + + + + + + + + + + + + +
UtilityDescription
h5unjam +Extracts a user block from an HDF5 file +
h5dump +The -B (--superblock) option displays the size of the user block in an HDF5 file +
+ +\subsection subsecViewToolsJPSSView_h5unjam h5unjam +The \ref secViewToolsEditAdd tutorial topic discusses the use of the +h5jam and h5unjam +utilities for adding or removing a user block from a file. An input HDF5 file +(-i), output HDF5 file +(-o), and user block text file +(-u) can be specified with these tools. You can use the +h5unjam tool to extract and view the user block in a JPSS file: +\code +h5unjam -i -o -u +\endcode + +For example this command will extract the user block into the file UB.xml: +\code +$ h5unjam -i SVM09_npp_d20120229_t0849107_e0854511_b01759_c20120229145452682127_noaa_ops.h5 + -o svm09-noUB.h5 -u UB.xml +\endcode + +The input HDF5 file remains unchanged. The output HDF5 file will not contain the user block. +The UB.xml file contains the user block +which can be viewed with a browser. + +\subsection subsecViewToolsJPSSView_h5dump h5dump +The h5dump utility has the -B (--superblock) option for displaying the superblock in an HDF5 file. +The superblock contains information about the file such as the file signature, file consistency flags, +the number of bytes to store addresses and size of an object, as well as the size of the user block: +\code +h5dump -B (--superblock) +\endcode + +Below is an example (Unix): +\code +$ h5dump -B -H SVM09_npp_d20120229_t0849107_e0854511_b01759_c20120229145452682127_noaa_ops.h5 | more +HDF5 "SVM09_npp_d20120229_t0849107_e0854511_b01759_c20120229145452682127_noaa_ops.h5" { +SUPER_BLOCK { + SUPERBLOCK_VERSION 0 + FREELIST_VERSION 0 + SYMBOLTABLE_VERSION 0 + OBJECTHEADER_VERSION 0 + OFFSET_SIZE 8 + LENGTH_SIZE 8 + BTREE_RANK 16 + BTREE_LEAF 4 + ISTORE_K 32 + USER_BLOCK { + USERBLOCK_SIZE 1024 + } +} +\endcode + +Once you have the size of the user block, you can extract it from the file using system commands. +For example, on Unix platforms you can use the head command-line tool: +\code +head -c >& USERBLOCK.xml +\endcode + +There are Unix tools for Windows that may work, such as CoreUtils for Windows. + +\section secViewToolsJPSSExam Examining a Granule +
    +
  • @ref subsecViewToolsJPSSExam_h5dump
    +
      +
    • @ref subsubsecViewToolsJPSSExam_h5dumpRegRef
    • +
    • @ref subsubsecViewToolsJPSSExam_h5dumpQuality
    • +
    • @ref subsubsecViewToolsJPSSExam_h5dumpProps
    • +
  • +
  • @ref subsecViewToolsJPSSExamr_HDFView
  • +
+ +\subsection subsecViewToolsJPSSExam_h5dump h5dump +There are several options that you may first want to use when examining a granule with h5dump: + + + + + + + + + + + + + + + + + + + + +
OptionDescription
-H, --header +Prints header (metadata) information only +
-d D, --dataset=D +Specifies the granule dataset +
-A 0, --onlyattr=0 +Suppresses attributes +
-p, --properties +Show properties of datasets +(See Properties) +
+ +You would specify the dataset (-d D) and the +-H options to view the metadata associated with +a specific granule. There are many attributes associated with a granule and +-A 0 can be used to suppress those. + +For example: +\code +h5dump -H -A 0 -d "/Data_Products/VIIRS-M9-SDR/VIIRS-M9-SDR_Gran_0" + SVM09_npp_d20120229_t0849107_e0854511_b01759_c20120229145452682127_noaa_ops.h5 +\endcode + +This command displays: +\code + HDF5 "SVM09_npp_d20120229_t0849107_e0854511_b01759_c20120229145452682127_noaa_ops.h5" { + DATASET "/Data_Products/VIIRS-M9-SDR/VIIRS-M9-SDR_Gran_0" { + DATATYPE H5T_REFERENCE { H5T_STD_REF_DSETREG } + DATASPACE SIMPLE { ( 16 ) / ( H5S_UNLIMITED ) } + } + } +\endcode + +To see the actual contents of the granule remove the -H option: +\code +h5dump -A 0 -d "/Data_Products/VIIRS-M9-SDR/VIIRS-M9-SDR_Gran_0" + SVM09_npp_d20120229_t0849107_e0854511_b01759_c20120229145452682127_noaa_ops.h5 +\endcode + +The above command displays: +\code +HDF5 "SVM09_npp_d20120229_t0849107_e0854511_b01759_c20120229145452682127_noaa_ops.h5" { +DATASET "/Data_Products/VIIRS-M9-SDR/VIIRS-M9-SDR_Gran_0" { + DATATYPE H5T_REFERENCE { H5T_STD_REF_DSETREG } + DATASPACE SIMPLE { ( 16 ) / ( H5S_UNLIMITED ) } + DATA { + DATASET /All_Data/VIIRS-M9-SDR_All/Radiance {(0,0)-(767,3199)}, + DATASET /All_Data/VIIRS-M9-SDR_All/Reflectance {(0,0)-(767,3199)}, + DATASET /All_Data/VIIRS-M9-SDR_All/ModeScan {(0)-(47)}, + DATASET /All_Data/VIIRS-M9-SDR_All/ModeGran {(0)-(0)}, + DATASET /All_Data/VIIRS-M9-SDR_All/PadByte1 {(0)-(2)}, + DATASET /All_Data/VIIRS-M9-SDR_All/NumberOfScans {(0)-(0)}, + DATASET /All_Data/VIIRS-M9-SDR_All/NumberOfMissingPkts {(0)-(47)}, + DATASET /All_Data/VIIRS-M9-SDR_All/NumberOfBadChecksums {(0)-(47)}, + DATASET /All_Data/VIIRS-M9-SDR_All/NumberOfDiscardedPkts {(0)-(47)}, + DATASET /All_Data/VIIRS-M9-SDR_All/QF1_VIIRSMBANDSDR {(0,0)-(767,3199)}, + DATASET /All_Data/VIIRS-M9-SDR_All/QF2_SCAN_SDR {(0)-(47)}, + DATASET /All_Data/VIIRS-M9-SDR_All/QF3_SCAN_RDR {(0)-(47)}, + DATASET /All_Data/VIIRS-M9-SDR_All/QF4_SCAN_SDR {(0)-(767)}, + DATASET /All_Data/VIIRS-M9-SDR_All/QF5_GRAN_BADDETECTOR {(0)-(15)}, + DATASET /All_Data/VIIRS-M9-SDR_All/RadianceFactors {(0)-(1)}, + DATASET /All_Data/VIIRS-M9-SDR_All/ReflectanceFactors {(0)-(1)} + } +} +} +\endcode + +As you can see in the output above, the datatype for this dataset is: +\code +DATATYPE H5T_REFERENCE { H5T_STD_REF_DSETREG } +\endcode + +This indicates that it is a dataset specifically for storing references to regions (or subsets) +in other datasets. The dataset contains 16 such references, and more can be added to it, as +indicated by the dataspace (in other words it is unlimited): +\code +DATASPACE SIMPLE { ( 16 ) / ( H5S_UNLIMITED ) } +\endcode + +\subsubsection subsubsecViewToolsJPSSExam_h5dumpRegRef Viewing a Region Reference +What if we wanted to look at the NumberOfScans data for a specific granule in a file? + +First, we may be interested in determining whether the scans were done at night or in the day. If a scan was at night, there will be no data. + +The attribute N_Day_Night_Flag is used to determine when the scan was done. If you don't know where this attribute is located, you can use the -N option to search for it in the file. If you were to run this command on the SVM09 file used above, you would see that the N_Day_Night_Flag attribute has a value of Night for the four granules in the file. Indeed, if you actually examine the NumberOfScans data, you will see that only fill values are written. + +For that reason we will examine the NumberOfScans data for the SVMO1 file below, as it was obtained during the day: +\code +h5dump -N N_Day_Night_Flag SVM01_npp_d20130524_t1255132_e1256374_b08146_c20130524192048864992_noaa_ops.h5 +\endcode + +It displays: +\code +HDF5 "SVM01_npp_d20130524_t1255132_e1256374_b08146_c20130524192048864992_noaa_ops.h5" { +ATTRIBUTE "N_Day_Night_Flag" { + DATATYPE H5T_STRING { + STRSIZE 4; + STRPAD H5T_STR_NULLTERM; + CSET H5T_CSET_ASCII; + CTYPE H5T_C_S1; + } + DATASPACE SIMPLE { ( 1, 1 ) / ( 1, 1 ) } + DATA { + (0,0): "Day" + } +} +} +\endcode + +There is just one granule in this SVM01 file, as shown below: +\code +$ h5dump -n SVM01_npp_d20130524_t1255132_e1256374_b08146_c20130524192048864992_noaa_ops.h5 +HDF5 "SVM01_npp_d20130524_t1255132_e1256374_b08146_c20130524192048864992_noaa_ops.h5" { +FILE_CONTENTS { + group / + group /All_Data + group /All_Data/VIIRS-M1-SDR_All + dataset /All_Data/VIIRS-M1-SDR_All/ModeGran + dataset /All_Data/VIIRS-M1-SDR_All/ModeScan + dataset /All_Data/VIIRS-M1-SDR_All/NumberOfBadChecksums + dataset /All_Data/VIIRS-M1-SDR_All/NumberOfDiscardedPkts + dataset /All_Data/VIIRS-M1-SDR_All/NumberOfMissingPkts + dataset /All_Data/VIIRS-M1-SDR_All/NumberOfScans + dataset /All_Data/VIIRS-M1-SDR_All/PadByte1 + dataset /All_Data/VIIRS-M1-SDR_All/QF1_VIIRSMBANDSDR + dataset /All_Data/VIIRS-M1-SDR_All/QF2_SCAN_SDR + dataset /All_Data/VIIRS-M1-SDR_All/QF3_SCAN_RDR + dataset /All_Data/VIIRS-M1-SDR_All/QF4_SCAN_SDR + dataset /All_Data/VIIRS-M1-SDR_All/QF5_GRAN_BADDETECTOR + dataset /All_Data/VIIRS-M1-SDR_All/Radiance + dataset /All_Data/VIIRS-M1-SDR_All/RadianceFactors + dataset /All_Data/VIIRS-M1-SDR_All/Reflectance + dataset /All_Data/VIIRS-M1-SDR_All/ReflectanceFactors + group /Data_Products + group /Data_Products/VIIRS-M1-SDR + dataset /Data_Products/VIIRS-M1-SDR/VIIRS-M1-SDR_Aggr + dataset /Data_Products/VIIRS-M1-SDR/VIIRS-M1-SDR_Gran_0 + } +} +\endcode + +Now examine the references in the VIIRS-M1-SDR_Gran_0 granule +\code +$ h5dump -A 0 -d "/Data_Products/VIIRS-M1-SDR/VIIRS-M1-SDR_Gran_0" + SVM01_npp_d20130524_t1255132_e1256374_b08146_c20130524192048864992_noaa_ops.h5 +HDF5 "SVM01_npp_d20130524_t1255132_e1256374_b08146_c20130524192048864992_noaa_ops.h5" { +DATASET "/Data_Products/VIIRS-M1-SDR/VIIRS-M1-SDR_Gran_0" { + DATATYPE H5T_REFERENCE { H5T_STD_REF_DSETREG } + DATASPACE SIMPLE { ( 16 ) / ( H5S_UNLIMITED ) } + DATA { + DATASET /All_Data/VIIRS-M1-SDR_All/Radiance {(0,0)-(767,3199)}, + DATASET /All_Data/VIIRS-M1-SDR_All/Reflectance {(0,0)-(767,3199)}, + DATASET /All_Data/VIIRS-M1-SDR_All/ModeScan {(0)-(47)}, + DATASET /All_Data/VIIRS-M1-SDR_All/ModeGran {(0)-(0)}, + DATASET /All_Data/VIIRS-M1-SDR_All/PadByte1 {(0)-(2)}, + DATASET /All_Data/VIIRS-M1-SDR_All/NumberOfScans {(0)-(0)}, + DATASET /All_Data/VIIRS-M1-SDR_All/NumberOfMissingPkts {(0)-(47)}, + DATASET /All_Data/VIIRS-M1-SDR_All/NumberOfBadChecksums {(0)-(47)}, + DATASET /All_Data/VIIRS-M1-SDR_All/NumberOfDiscardedPkts {(0)-(47)}, + DATASET /All_Data/VIIRS-M1-SDR_All/QF1_VIIRSMBANDSDR {(0,0)-(767,3199)}, + DATASET /All_Data/VIIRS-M1-SDR_All/QF2_SCAN_SDR {(0)-(47)}, + DATASET /All_Data/VIIRS-M1-SDR_All/QF3_SCAN_RDR {(0)-(47)}, + DATASET /All_Data/VIIRS-M1-SDR_All/QF4_SCAN_SDR {(0)-(767)}, + DATASET /All_Data/VIIRS-M1-SDR_All/QF5_GRAN_BADDETECTOR {(0)-(15)}, + DATASET /All_Data/VIIRS-M1-SDR_All/RadianceFactors {(0)-(1)}, + DATASET /All_Data/VIIRS-M1-SDR_All/ReflectanceFactors {(0)-(1)} + } +} +} +\endcode + +In the output above, you can see that the NumberOfScans +reference is the sixth reference in the granule counting from the top. + +The list of references shown above is a 0-based index to the dataset. Therefore, to specify +NumberOfScans, enter a start offset of +5 for the -s +option (the sixth reference minus 1). To see the region reference data, use the -R option. + +This command will display the data in the NumberOfScans region reference: +\code +h5dump -A 0 -d "/Data_Products/VIIRS-M1-SDR/VIIRS-M1-SDR_Gran_0" -s 5 -R + SVM01_npp_d20130524_t1255132_e1256374_b08146_c20130524192048864992_noaa_ops.h5 +\endcode + +It displays the number of scans (48): +\code +HDF5 "SVM01_npp_d20130524_t1255132_e1256374_b08146_c20130524192048864992_noaa_ops.h5" { +DATASET "/Data_Products/VIIRS-M1-SDR/VIIRS-M1-SDR_Gran_0" { + DATATYPE H5T_REFERENCE { H5T_STD_REF_DSETREG } + DATASPACE SIMPLE { ( 16 ) / ( H5S_UNLIMITED ) } + SUBSET { + START ( 5 ); + STRIDE ( 1 ); + COUNT ( 1 ); + BLOCK ( 1 ); + DATA { + (5): DATASET /All_Data/VIIRS-M1-SDR_All/NumberOfScans { + (5): REGION_TYPE BLOCK (0)-(0) + (5): DATATYPE H5T_STD_I32BE + (5): DATASPACE SIMPLE { ( 1 ) / ( H5S_UNLIMITED ) } + (5): DATA { + (0): 48 + (5): } + (5): } + } + } +} +} +\endcode + +The -s option may be familiar as one of the options +that was described in the \ref secViewToolsViewSub tutorial topic. The other subsetting options are not included, +indicating that the default values are used. + +If you leave off the -R option, you will see the subset selection, but not the data: +\code +$ h5dump -A 0 -d "/Data_Products/VIIRS-M1-SDR/VIIRS-M1-SDR_Gran_0" -s 5 + SVM01_npp_d20130524_t1255132_e1256374_b08146_c20130524192048864992_noaa_ops.h5 +HDF5 "SVM01_npp_d20130524_t1255132_e1256374_b08146_c20130524192048864992_noaa_ops.h5" { +DATASET "/Data_Products/VIIRS-M1-SDR/VIIRS-M1-SDR_Gran_0" { + DATATYPE H5T_REFERENCE { H5T_STD_REF_DSETREG } + DATASPACE SIMPLE { ( 16 ) / ( H5S_UNLIMITED ) } + SUBSET { + START ( 5 ); + STRIDE ( 1 ); + COUNT ( 1 ); + BLOCK ( 1 ); + DATA { + DATASET /All_Data/VIIRS-M1-SDR_All/NumberOfScans {(0)-(0)} + } + } +} +} +\endcode + +\subsubsection subsubsecViewToolsJPSSExam_h5dumpQuality Viewing a Quality Flag +The quality flags in an NPP file can be viewed with h5dump using the -M +option. Quality flags are packed into each integer value in a quality flag dataset. Quality flag datasets in NPP +files begin with the letters QF. + +In the following NPP file, there are five Quality Flag datasets +(/All_Data/VIIRS-M1-SDR_All/QF*): +\code +$ h5dump -n SVM01_npp_d20130524_t1255132_e1256374_b08146_c20130524192048864992_noaa_ops.h5 +HDF5 "SVM01_npp_d20130524_t1255132_e1256374_b08146_c20130524192048864992_noaa_ops.h5" { +FILE_CONTENTS { + group / + group /All_Data + group /All_Data/VIIRS-M1-SDR_All + dataset /All_Data/VIIRS-M1-SDR_All/ModeGran + dataset /All_Data/VIIRS-M1-SDR_All/ModeScan + dataset /All_Data/VIIRS-M1-SDR_All/NumberOfBadChecksums + dataset /All_Data/VIIRS-M1-SDR_All/NumberOfDiscardedPkts + dataset /All_Data/VIIRS-M1-SDR_All/NumberOfMissingPkts + dataset /All_Data/VIIRS-M1-SDR_All/NumberOfScans + dataset /All_Data/VIIRS-M1-SDR_All/PadByte1 + dataset /All_Data/VIIRS-M1-SDR_All/QF1_VIIRSMBANDSDR + dataset /All_Data/VIIRS-M1-SDR_All/QF2_SCAN_SDR + dataset /All_Data/VIIRS-M1-SDR_All/QF3_SCAN_RDR + dataset /All_Data/VIIRS-M1-SDR_All/QF4_SCAN_SDR + dataset /All_Data/VIIRS-M1-SDR_All/QF5_GRAN_BADDETECTOR + dataset /All_Data/VIIRS-M1-SDR_All/Radiance + dataset /All_Data/VIIRS-M1-SDR_All/RadianceFactors + dataset /All_Data/VIIRS-M1-SDR_All/Reflectance + dataset /All_Data/VIIRS-M1-SDR_All/ReflectanceFactors + group /Data_Products + group /Data_Products/VIIRS-M1-SDR + dataset /Data_Products/VIIRS-M1-SDR/VIIRS-M1-SDR_Aggr + dataset /Data_Products/VIIRS-M1-SDR/VIIRS-M1-SDR_Gran_0 + } +} +\endcode + +The flags in this particular dataset happen to be stored in every two bits of each quality flag dataset +element, and the values range from 0 to 2. In other words, to see the quality flag values for this +dataset, these bits would be examined: 0 and 1, 2 and 3, 4 and 5, or 6 and 7 (This information was +obtained from the Product Profile XML File.) + +For example, bits 0 and 1 in the VQF1_VIIRSMBANDSDR dataset specify the flag that +"Indicates calibration quality due to bad space view offsets, OBC view offsets, etc or use of a +previous calibration view". It has 3 values: Good (0), Poor (1), or No Calibration (2). + +The -M option is used to specify the quality +flag bit offset (O) and length (L): +\code +h5dump -d DATASET -M O,L FILE +\endcode + +To view the first quality flag (0-1) in a 5 x 6 subset of the QF1_VIIRSMBANDSDR dataset, specify: +\code +h5dump -d "/All_Data/VIIRS-M1-SDR_All/QF1_VIIRSMBANDSDR[0,0;;5,6;]" + -M 0,2 SVM01_npp_d20130524_t1255132_e1256374_b08146_c20130524192048864992_noaa_ops.h5 +\endcode + +This outputs: +\code +HDF5 "SVM01_npp_d20130524_t1255132_e1256374_b08146_c20130524192048864992_noaa_ops.h5" { +DATASET "/All_Data/VIIRS-M1-SDR_All/QF1_VIIRSMBANDSDR" { + DATATYPE H5T_STD_U8BE + DATASPACE SIMPLE { ( 768, 3200 ) / ( H5S_UNLIMITED, H5S_UNLIMITED ) } + PACKED_BITS OFFSET=0 LENGTH=2 + SUBSET { + START ( 0, 0 ); + STRIDE ( 1, 1 ); + COUNT ( 5, 6 ); + BLOCK ( 1, 1 ); + DATA { + (0,0): 2, 2, 2, 2, 2, 2, + (1,0): 2, 2, 2, 2, 2, 2, + (2,0): 0, 0, 0, 0, 0, 0, + (3,0): 0, 0, 0, 0, 0, 0, + (4,0): 0, 0, 0, 0, 0, 0 + } + } +} +} +\endcode + +To view more than one quality flag at a time simply add the bit offset and length values to +-M, separated by commas. For example, this +-M option specifies bits 0-1 and 2-3: +\code +h5dump -d DATASET -M 0,2,2,2 FILE +\endcode + +\subsubsection subsubsecViewToolsJPSSExam_h5dumpProps Properties +To view properties of a specific dataset with h5dump +use the -p option along with the +-d option. Depending on the number of attributes +and the amount of data, the -A 0 and +-H options can also be specified to suppress +printing of attributes and data values: +\code +h5dump -p -H -A 0 -d DATASET +\endcode + +The -p option shows any compression filters +associated with a dataset, as well as layout and fill value information. This option can be helpful +in diagnosing performance and other issues. + +As an example, examine the /All_Data/VIIRS-M1-SDR_All/Radiance +dataset in the SVM01 file: +\code +$ h5dump -p -H -A 0 -d "/All_Data/VIIRS-M1-SDR_All/Radiance" + SVM01_npp_d20130524_t1255132_e1256374_b08146_c20130524192048864992_noaa_ops.h5 +HDF5 "SVM01_npp_d20130524_t1255132_e1256374_b08146_c20130524192048864992_noaa_ops.h5" { +DATASET "/All_Data/VIIRS-M1-SDR_All/Radiance" { + DATATYPE H5T_STD_U16BE + DATASPACE SIMPLE { ( 768, 3200 ) / ( H5S_UNLIMITED, H5S_UNLIMITED ) } + STORAGE_LAYOUT { + CHUNKED ( 768, 3200 ) + SIZE 4915200 + } + FILTERS { + NONE + } + FILLVALUE { + FILL_TIME H5D_FILL_TIME_IFSET + VALUE 65529 + } + ALLOCATION_TIME { + H5D_ALLOC_TIME_INCR + } +} +} +\endcode + +We can see that the chunk size for this dataset is 768 x 3200, and the storage size is 4915200. + +What if the chunk size were smaller? + +The dataset was modified to have a chunk size of 1 x 10, using the +h5repack utility, as shown below. +\code +$ h5repack -l /All_Data/VIIRS-M1-SDR_All/Radiance:CHUNK=1x10 + SVM01_npp_d20130524_t1255132_e1256374_b08146_c20130524192048864992_noaa_ops.h5 SVM01repack.h5 + +$ h5dump -p -H -A 0 -d "/All_Data/VIIRS-M1-SDR_All/Radiance" SVM01repack.h5 +HDF5 "SVM01repack.h5" { +DATASET "/All_Data/VIIRS-M1-SDR_All/Radiance" { + DATATYPE H5T_STD_U16BE + DATASPACE SIMPLE { ( 768, 3200 ) / ( H5S_UNLIMITED, H5S_UNLIMITED ) } + STORAGE_LAYOUT { + CHUNKED ( 1, 10 ) + SIZE 4915200 + } + FILTERS { + NONE + } + FILLVALUE { + FILL_TIME H5D_FILL_TIME_IFSET + VALUE 65529 + } + ALLOCATION_TIME { + H5D_ALLOC_TIME_INCR + } +} +} +\endcode + +In this case, the storage size of the dataset is the same, but the size of the file almost doubled!: +\code +$ ls -1sh +total 35M +12M SVM01_npp_d20130524_t1255132_e1256374_b08146_c20130524192048864992_noaa_ops.h5 +23M SVM01repack.h5 +\endcode + +In general, the smaller the chunk size, the more chunks that HDF5 has to keep track of, which increases +the size of the file and can affect performance. + +\subsection subsecViewToolsJPSSExamr_HDFView HDFView +As mentioned previously, the structure of an HDF5 file is displayed in the TreeView on the left side of the HDFView screen, +and you can click on objects and have metadata information displayed on the right side. + +To discover more about the granule /Data_Products/VIIRS-M1-SDR/VIIRS-M1-SDR_Gran_0 +in the SVM01 file shown below in the TreeView, position +the mouse over the granule and click to select. Properties for the object is displayed on the right side of the HDFView screen. +You can see Datatype and Dataspace information on the General Object Info +tab, any Attributes associated with the granulewill be on the +Object Attribute Info tab. In the +General Object Info, you can see that the dataset is a +Region Reference dataset, and that there are sixteen Region References in this dataset: + + + + +
+\image html hdfview-prop.png +
+ +To examine the data in the granule, click twice on it with the left mouse button in the TreeView, +and it will open in a new window.: + + + + +
+\image html hdfview-regref.png +
+ +If you click twice with the left mouse button on the fifth Region Reference +/All_Data/VIIRS-M1-SDR_All/NumberOfScans a window +will pop up with the value(s) of the reference: + + + + +
+\image html hdfview-regref2.png +
+ +You can also set a user option to automatically show the value(s) in a Region Reference. Under the +Tools pull-down menu, select +User Options and then select +HDF Settings and then select +Show RegRef Values in the +Data section (see the middle of the image below): + + + + +
+\image html hdfview-regrefval.png +
+ +Then you will automatically see the values of the Region Reference when you open it and select an entry: + + + + +
+\image html hdfview-regref1.png +
+ +You can view and set quality flags by clicking the right mouse button over a quality flags dataset under +All_Data and selecting +Open As from the pop-up menu. In the middle of +the window that pops up, you will see where you can specify Bitmask options. + + + + +
+\image html hdfview-qf.png +
+ +
+Navigate back: \ref index "Main" / \ref GettingStarted / \ref ViewToolsCommand + +*/ diff --git a/doxygen/dox/high_level/extension.dox b/doxygen/dox/high_level/extension.dox index c81ac6e..d754b96 100644 --- a/doxygen/dox/high_level/extension.dox +++ b/doxygen/dox/high_level/extension.dox @@ -1,60 +1,51 @@ /** \defgroup H5LR Extensions * - * Working with region references, hyperslab selections, + * Working with region references, hyperslab selections, * and bit-fields (H5LR, H5LT) * - * The following reference manual entries describe high-level HDF5 C and Fortran APIs - * for working with region references, hyperslab selections, and bit-fields. - * These functions were created as part of a project supporting + * The following reference manual entries describe high-level HDF5 C and Fortran APIs + * for working with region references, hyperslab selections, and bit-fields. + * These functions were created as part of a project supporting * NPP/NPOESS Data Production and Exploitation ( * - * project , - * - * software ). - * While they were written to facilitate access to NPP, NPOESS, and JPSS - * data in the HDF5 format, these functions may be useful to anyone working + * project, + * software ). + * While they were written to facilitate access to NPP, NPOESS, and JPSS + * data in the HDF5 format, these functions may be useful to anyone working * with region references, hyperslab selections, or bit-fields. * * Note that these functions are not part of the standard HDF5 distribution; - * the - * - * software + * the + * software * must be separately downloaded and installed. * - * A comprehensive guide to this library, - * + * A comprehensive guide to this library, + * * User Guide to the HDF5 High-level Library for Handling Region References and Hyperslab Selections - * is available at + * is available at * https://support.hdfgroup.org/projects/jpss/documentation/HL/UG/NPOESS_HL-UG.pdf. * * - \ref H5LRcopy_reference - * \n Copies data from the specified dataset to a new location and - * creates a reference to it. + * \n Copies data from the specified dataset to a new location and creates a reference to it. * - \ref H5LRcopy_region - * \n Copies data from a referenced region to a region in a - * destination dataset. + * \n Copies data from a referenced region to a region in a destination dataset. * - \ref H5LRcreate_ref_to_all - * \n Creates a dataset with the region references to the data in all - * datasets located under a specified group in a file or creates a - * dataset with object references to all objects (groups or datasets) + * \n Creates a dataset with the region references to the data in all datasets located under a + * specified group in a file or creates a dataset with object references to all objects (groups or datasets) * located under a specified group in a file. * - \ref H5LRcreate_region_references - * \n Creates an array of region references using an array of paths to + * \n Creates an array of region references using an array of paths to * datasets and an array of corresponding hyperslab descriptions. * - \ref H5LRget_region_info * \n Retrieves information about the data a region reference points to. * - \ref H5LRmake_dataset - * \n Creates and writes a dataset containing a list of - * region references. + * \n Creates and writes a dataset containing a list of region references. * - \ref H5LRread_region - * \n Retrieves raw data pointed to by a region reference to - * an application buffer. + * \n Retrieves raw data pointed to by a region reference to an application buffer. * - \ref H5LTcopy_region - * \n Copies data from a specified region in a source dataset - * to a specified region in a destination dataset. + * \n Copies data from a specified region in a source dataset to a specified region in a destination dataset. * - \ref H5LTread_bitfield_value - * \n Retrieves the values of quality flags for each element - * to the application provided buffer. + * \n Retrieves the values of quality flags for each element to the application provided buffer. * - \ref H5LTread_region * \n Reads selected data to an application buffer. * @@ -77,24 +68,24 @@ * \param[in] path Path to the dataset being created * \param[in] type_id Datatype of the dataset * \param[in] buf_size Size of the \p loc_id_ref and \p buf arrays - * \param[in] loc_id_ref Array of object identifiers; each identifier - * describes to which HDF5 file the corresponding + * \param[in] loc_id_ref Array of object identifiers; each identifier + * describes to which HDF5 file the corresponding * region reference belongs to * \param[in] buf Array of region references * * \return \herr_t * - * \details Given an array of size \p buf_size of region references \p buf, - * the function will create a dataset with path \p path, at location - * specified by \p loc_id and of a datatype specified by \p type_id, - * and will write data associated with each region reference in the order - * corresponding to the order of the region references in the buffer. - * It is assumed that all referenced hyperslabs have the same dimensionality, - * and only the size of the slowest changing dimension may differ. - * Each reference in the \p buf array belongs to the file identified + * \details Given an array of size \p buf_size of region references \p buf, + * the function will create a dataset with path \p path, at location + * specified by \p loc_id and of a datatype specified by \p type_id, + * and will write data associated with each region reference in the order + * corresponding to the order of the region references in the buffer. + * It is assumed that all referenced hyperslabs have the same dimensionality, + * and only the size of the slowest changing dimension may differ. + * Each reference in the \p buf array belongs to the file identified * by the corresponding object identifiers in the array \p loc_id_ref. * - * If \p path does not exist in \p loc_id then the function will + * If \p path does not exist in \p loc_id then the function will * create the path specified by \p path automatically. * * \version 1.1 Fortran wrapper introduced in this release. @@ -103,10 +94,10 @@ * */ H5_HLRDLL herr_t H5LRmake_dataset(hid_t loc_id, - const char *path, - hid_t type_id, const size_t buf_size, - const hid_t *loc_id_ref, - const hdset_reg_ref_t *buf); + const char *path, + hid_t type_id, const size_t buf_size, + const hid_t *loc_id_ref, + const hdset_reg_ref_t *buf); /*------------------------------------------------------------------------- * @@ -119,49 +110,46 @@ H5_HLRDLL herr_t H5LRmake_dataset(hid_t loc_id, * -------------------------------------------------------------------------- * \ingroup H5LR * - * \brief Creates an array of region references using an array of paths to + * \brief Creates an array of region references using an array of paths to * datasets and an array of corresponding hyperslab descriptions. * * \param[in] obj_id File identifier for the HDF5 file containing * the referenced regions or an object identifier * for any object in that file - * \param[in] num_elem Number of elements in the \p path and - * \p buf arrays - * \param[in] path Array of pointers to strings, which contain - * the paths to the target datasets for the - * region references + * \param[in] num_elem Number of elements in the \p path and \p buf arrays + * \param[in] path Array of pointers to strings, which contain + * the paths to the target datasets for the region references * \param[in] block_coord Array of hyperslab coordinate - * \param[out] buf Buffer for returning an array of region - * references + * \param[out] buf Buffer for returning an array of region references * * \return \herr_t * * \note **Motivation:** - * \note H5LRcreate_region_references() is useful when creating + * \note H5LRcreate_region_references() is useful when creating * large numbers of similar region references. * - * \details H5LRcreate_region_references() creates a list of region references - * given an array of paths to datasets and another array listing the + * \details H5LRcreate_region_references() creates a list of region references + * given an array of paths to datasets and another array listing the * corner coordinates of the corresponding hyperslabs. * * \p path parameter is an array of pointers to strings. * - * \p num_elem specifies the number of region references to be created, + * \p num_elem specifies the number of region references to be created, * thus specifying the size of the \p path and \p _buf arrays. * - * Buffer \p block_coord has size 2*rank and is the coordinates of the - * starting point following by the coordinates of the ending point of - * the hyperslab, repeated \p num_elem times for each hyperslab. - * For example, creating two region references to two hyperslabs, - * one with a rectangular hyperslab region starting at element (2,2) - * to element (5,4) and the second rectangular region starting at - * element (7,7) to element (9,10), results in \p block_coord + * Buffer \p block_coord has size 2*rank and is the coordinates of the + * starting point following by the coordinates of the ending point of + * the hyperslab, repeated \p num_elem times for each hyperslab. + * For example, creating two region references to two hyperslabs, + * one with a rectangular hyperslab region starting at element (2,2) + * to element (5,4) and the second rectangular region starting at + * element (7,7) to element (9,10), results in \p block_coord * being {2,2,5,4, 7,7,9,10}. * - * The rank of the hyperslab will be the same as the rank of the - * target dataset. H5LRcreate_region_references() will retrieve - * the rank for each dataset and will use those values to interpret - * the values in the buffer. Please note that rank may vary from one + * The rank of the hyperslab will be the same as the rank of the + * target dataset. H5LRcreate_region_references() will retrieve + * the rank for each dataset and will use those values to interpret + * the values in the buffer. Please note that rank may vary from one * dataset to another. * * \version 1.1 Fortran wrapper introduced in this release. @@ -170,43 +158,39 @@ H5_HLRDLL herr_t H5LRmake_dataset(hid_t loc_id, * */ H5_HLRDLL herr_t H5LRcreate_region_references(hid_t obj_id, - size_t num_elem, - const char **path, - const hsize_t *block_coord, - hdset_reg_ref_t *buf); + size_t num_elem, + const char **path, + const hsize_t *block_coord, + hdset_reg_ref_t *buf); /** * -------------------------------------------------------------------------- * \ingroup H5LR * - * \brief Copies data from the specified dataset to a new location and - * creates a reference to it. + * \brief Copies data from the specified dataset to a new location and creates a reference to it. * - * \param[in] obj_id Identifier of any object in a file an - * HDF5 reference belongs to + * \param[in] obj_id Identifier of any object in a file an HDF5 reference belongs to * \param[in] ref Reference to the datasets region - * \param[in] file Name of the destination file + * \param[in] file Name of the destination file * \param[in] path Full path to the destination dataset - * \param[in] block_coord Hyperslab coordinates in the destination - * dataset - * \param[out] ref_new Region reference to the new location of - * data + * \param[in] block_coord Hyperslab coordinates in the destination dataset + * \param[out] ref_new Region reference to the new location of data * * \return \herr_t * - * \details Given a data set pointed to by a region reference, the function - * H5LRcopy_reference() will copy the hyperslab data referenced by - * a datasets region reference into existing dataset specified by - * its path \p path in the file with the name \p file, and to location - * specified by the hyperslab coordinates \p block_coord. It will - * create the region reference \p ref_new to point to the new location. - * The number of elements in the old and newly specified regions has + * \details Given a data set pointed to by a region reference, the function + * H5LRcopy_reference() will copy the hyperslab data referenced by + * a datasets region reference into existing dataset specified by + * its path \p path in the file with the name \p file, and to location + * specified by the hyperslab coordinates \p block_coord. It will + * create the region reference \p ref_new to point to the new location. + * The number of elements in the old and newly specified regions has * to be the same. * - * Buffer \p block_coord has size 2*rank and is the coordinates of - * the starting point following by the coordinates of the ending - * point of the hyperslab. For example, to extract a rectangular - * hyperslab region starting at element (2,2) to element (5,4) + * Buffer \p block_coord has size 2*rank and is the coordinates of + * the starting point following by the coordinates of the ending + * point of the hyperslab. For example, to extract a rectangular + * hyperslab region starting at element (2,2) to element (5,4) * then \p block_coord would be {2, 2, 5, 4}. * * \version 1.1 Fortran wrapper introduced in this release. @@ -215,41 +199,39 @@ H5_HLRDLL herr_t H5LRcreate_region_references(hid_t obj_id, * */ H5_HLRDLL herr_t H5LRcopy_reference(hid_t obj_id, hdset_reg_ref_t *ref, const char *file, - const char *path, const hsize_t *block_coord, - hdset_reg_ref_t *ref_new); + const char *path, const hsize_t *block_coord, + hdset_reg_ref_t *ref_new); /** * -------------------------------------------------------------------------- * \ingroup H5LR * - * \brief Copies data from a referenced region to a region in a - * destination dataset. + * \brief Copies data from a referenced region to a region in a destination dataset. * - * \param[in] obj_id Identifier of any object in a file + * \param[in] obj_id Identifier of any object in a file * dataset region reference belongs to * \param[in] ref Dataset region reference - * \param[in] file Name of the destination file + * \param[in] file Name of the destination file * \param[in] path Full path to the destination dataset - * \param[in] block_coord Hyperslab coordinates in the destination - * dataset + * \param[in] block_coord Hyperslab coordinates in the destination dataset * * \return \herr_t * - * \details Given a dataset region reference \p ref in a source file - * specified by an identifier of any object in that file - * \p obj_id, the function will write data to the existing - * dataset \p path in file \p file to the simple hyperslab + * \details Given a dataset region reference \p ref in a source file + * specified by an identifier of any object in that file + * \p obj_id, the function will write data to the existing + * dataset \p path in file \p file to the simple hyperslab * specified by \p block_coord. * - * Buffer \p block_coord has size 2*rank and is the coordinates - * of the starting point following by the coordinates of the - * ending point of the hyperslab. For example, to specify a - * rectangular hyperslab destination region starting at element + * Buffer \p block_coord has size 2*rank and is the coordinates + * of the starting point following by the coordinates of the + * ending point of the hyperslab. For example, to specify a + * rectangular hyperslab destination region starting at element * (2,2) to element (5,4) then \p block_coord would be {2, 2, 5, 4}. * - * If \p path does not exist in the destination file (as may be - * the case when writing to a new file) then the dataset will be - * copied directly to the \p path and \p block_coord will be + * If \p path does not exist in the destination file (as may be + * the case when writing to a new file) then the dataset will be + * copied directly to the \p path and \p block_coord will be * disregarded. * * \version 1.1 Fortran wrapper introduced in this release. @@ -258,71 +240,66 @@ H5_HLRDLL herr_t H5LRcopy_reference(hid_t obj_id, hdset_reg_ref_t *ref, const ch * */ H5_HLRDLL herr_t H5LRcopy_region(hid_t obj_id, - hdset_reg_ref_t *ref, - const char *file, - const char *path, - const hsize_t *block_coord); + hdset_reg_ref_t *ref, + const char *file, + const char *path, + const hsize_t *block_coord); /** * -------------------------------------------------------------------------- * \ingroup H5LR * - * \brief Creates a dataset with the region references to the data - * in all datasets located under a specified group in a file - * or creates a dataset with object references to all objects + * \brief Creates a dataset with the region references to the data + * in all datasets located under a specified group in a file + * or creates a dataset with object references to all objects * (groups or datasets) located under a specified group in a file. * * \fg_loc_id - * \param[in] group_path Absolute or relative path to the group - * at which traversal starts - * \param[in] ds_path Absolute or relative path to the dataset - * with region references to be created - * \param[in] index_type Index_type; - * see valid values below in description - * \param[in] order Order in which index is traversed; - * see valid values below in description - * \param[in] ref_type Reference type; - * see valid values below in description + * \param[in] group_path Absolute or relative path to the group at which traversal starts + * \param[in] ds_path Absolute or relative path to the dataset with region references to be created + * \param[in] index_type Index_type; see valid values below in description + * \param[in] order Order in which index is traversed; see valid values below in description + * \param[in] ref_type Reference type; see valid values below in description * * \return \herr_t * - * \details H5LRcreate_ref_to_all() creates a dataset with the - * region references to the data in all datasets located - * under a specified group in a file or creates a dataset with - * object references to all objects (groups or datasets) located + * \details H5LRcreate_ref_to_all() creates a dataset with the + * region references to the data in all datasets located + * under a specified group in a file or creates a dataset with + * object references to all objects (groups or datasets) located * under a specified group in a file. * - * Given a dataset path \p ds_path in a file specified by the - * \p loc_id identifier, the function H5LRcreate_ref_to_all() - * will create a contiguous one-dimensional dataset with the - * region references or object references depending on the value - * of the \p ref_type parameter. When \p ref_type is - * #H5R_DATASET_REGION, each region reference points to all data - * in a dataset encountered by an internally called H5Lvisit() - * routine, which starts at the group specified by the \p loc_id + * Given a dataset path \p ds_path in a file specified by the + * \p loc_id identifier, the function H5LRcreate_ref_to_all() + * will create a contiguous one-dimensional dataset with the + * region references or object references depending on the value + * of the \p ref_type parameter. When \p ref_type is + * #H5R_DATASET_REGION, each region reference points to all data + * in a dataset encountered by an internally called H5Lvisit() + * routine, which starts at the group specified by the \p loc_id * and \p group_path parameters. In a like manner, when - * \p ref_type is #H5R_OBJECT, each object reference points to + * \p ref_type is #H5R_OBJECT, each object reference points to * an object (a group or a dataset) encountered by H5Lvisit(). * - * If \p ds_path does not exist in \p loc_id then the function + * If \p ds_path does not exist in \p loc_id then the function * will create the path specified by \p ds_path automatically. * - * \p index_type specifies the index to be used. + * \p index_type specifies the index to be used. * Valid values include the following: * - #H5_INDEX_NAME Alphanumeric index on name * - #H5_INDEX_CRT_ORDER Index on creation order * - * \p order specifies the order in which objects are to be - * inspected along the index specified in \p index_type. + * \p order specifies the order in which objects are to be + * inspected along the index specified in \p index_type. * Valid values include the following: * - #H5_ITER_INC Increasing order * - #H5_ITER_DEC Decreasing order * - #H5_ITER_NATIVE Fastest available order * - * For more detailed information on these two parameters, - * see H5Lvisit(). + * For more detailed information on these two parameters, + * @see H5Lvisit(). * - * \p ref_type specifies the type of the reference to be used. + * \p ref_type specifies the type of the reference to be used. * Valid values include the following: * - #H5R_DATASET_REGION Dataset region reference * - #H5R_OBJECT Object reference @@ -333,7 +310,7 @@ H5_HLRDLL herr_t H5LRcopy_region(hid_t obj_id, * */ H5_HLRDLL herr_t H5LRcreate_ref_to_all(hid_t loc_id, const char *group_path, - const char *ds_path, H5_index_t index_type, H5_iter_order_t order, H5R_type_t ref_type); + const char *ds_path, H5_index_t index_type, H5_iter_order_t order, H5R_type_t ref_type); /*------------------------------------------------------------------------- * @@ -352,30 +329,27 @@ H5_HLRDLL herr_t H5LRcreate_ref_to_all(hid_t loc_id, const char *group_path, * \param[in] obj_id File identifier for the HDF5 file containing * the dataset with the referenced region or an * object identifier for any object in that file - * \param[in] ref Region reference specifying data to be read - * in - * \param[in] mem_type Memory datatype of data read from referenced + * \param[in] ref Region reference specifying data to be read in + * \param[in] mem_type Memory datatype of data read from referenced * region into the application buffer - * \param[in,out] numelem Number of elements to be read into buffer - * \p buf - * \param[out] buf Buffer in which data is returned to the - * application + * \param[in,out] numelem Number of elements to be read into buffer \p buf + * \param[out] buf Buffer in which data is returned to the application * * \return \herr_t * - * \details H5LRread_region() reads data pointed to by the region + * \details H5LRread_region() reads data pointed to by the region * reference \p ref into the buffer \p buf. * - * \p numelem specifies the number of elements to be read - * into \p buf. When the size of the reference region is unknown, - * H5LRread_region() can be called with \p buf set to NULL; - * the number of elements in the referenced region will be returned + * \p numelem specifies the number of elements to be read + * into \p buf. When the size of the reference region is unknown, + * H5LRread_region() can be called with \p buf set to NULL; + * the number of elements in the referenced region will be returned * in \p numelem. * - * The buffer buf must be big enough to hold \p numelem elements - * of type \p mem_type. For example, if data is read from the referenced - * region into an integer buffer, \p mem_type should be #H5T_NATIVE_INT - * and the buffer must be at least \c sizeof(int) * \p numelem bytes + * The buffer buf must be big enough to hold \p numelem elements + * of type \p mem_type. For example, if data is read from the referenced + * region into an integer buffer, \p mem_type should be #H5T_NATIVE_INT + * and the buffer must be at least \c sizeof(int) * \p numelem bytes * in size. This buffer must be allocated by the application. * * \version 1.1 Fortran wrapper introduced in this release. @@ -384,10 +358,10 @@ H5_HLRDLL herr_t H5LRcreate_ref_to_all(hid_t loc_id, const char *group_path, * */ H5_HLRDLL herr_t H5LRread_region(hid_t obj_id, - const hdset_reg_ref_t *ref, - hid_t mem_type, - size_t *numelem, - void *buf ); + const hdset_reg_ref_t *ref, + hid_t mem_type, + size_t *numelem, + void *buf ); /*------------------------------------------------------------------------- * @@ -400,40 +374,33 @@ H5_HLRDLL herr_t H5LRread_region(hid_t obj_id, * -------------------------------------------------------------------------- * \ingroup H5LR * - * \brief Retrieves information about the data a region reference - * points to. + * \brief Retrieves information about the data a region reference points to. * - * \param[in] obj_id Identifier of any object in an HDF5 file - * the region reference belongs to. + * \param[in] obj_id Identifier of any object in an HDF5 file the region reference belongs to. * \param[in] ref Region reference to query - * \param[in,out] len Size of the buffer to store \p path in. - * NOTE: if \p *path is not NULL then \p *len - * must be the appropriate length + * \param[in,out] len Size of the buffer to store \p path in. + * NOTE: if \p *path is not NULL then \p *len must be the appropriate length * \param[out] path Full path that a region reference points to * \param[out] rank The number of dimensions of the dataset - * dimensions of the dataset pointed by - * region reference. - * \param[out] dtype Datatype of the dataset pointed by the - * region reference. + * dimensions of the dataset pointed by region reference. + * \param[out] dtype Datatype of the dataset pointed by the region reference. * \param[out] sel_type Type of the selection (point or hyperslab) - * \param[in,out] numelem Number of coordinate blocks or - * selected elements. - * \param[out] buf Buffer containing description of the region - * pointed by region reference + * \param[in,out] numelem Number of coordinate blocks or selected elements. + * \param[out] buf Buffer containing description of the region pointed by region reference * * \return \herr_t * - * \details H5LRget_region_info() queries information about the data - * pointed by a region reference \p ref. It returns one of the - * absolute paths to a dataset, length of the path, dataset’s rank - * and datatype, description of the referenced region and type of - * the referenced region. Any output argument can be NULL if that + * \details H5LRget_region_info() queries information about the data + * pointed by a region reference \p ref. It returns one of the + * absolute paths to a dataset, length of the path, dataset’s rank + * and datatype, description of the referenced region and type of + * the referenced region. Any output argument can be NULL if that * argument does not need to be returned. * - * The parameter \p obj_id is an identifier for any object in the - * HDF5 file containing the referenced object. For example, it can - * be an identifier of a dataset the region reference belongs to - * or an identifier of an HDF5 file the dataset with region references + * The parameter \p obj_id is an identifier for any object in the + * HDF5 file containing the referenced object. For example, it can + * be an identifier of a dataset the region reference belongs to + * or an identifier of an HDF5 file the dataset with region references * is stored in. * * The parameter \p ref is a region reference to query. @@ -442,36 +409,36 @@ H5_HLRDLL herr_t H5LRread_region(hid_t obj_id, * buffer of size \p len+1 to return an absolute path to a dataset * the region reference points to. * - * The parameter \p len is a length of absolute path string plus - * the \0 string terminator. If path parameter is NULL, actual - * length of the path (+1 for \0 string terminator) is returned to - * application and can be used to allocate buffer path of an + * The parameter \p len is a length of absolute path string plus + * the \0 string terminator. If path parameter is NULL, actual + * length of the path (+1 for \0 string terminator) is returned to + * application and can be used to allocate buffer path of an * appropriate length \p len. * * The parameter \p sel_type describes the type of the selected - * region. Possible values can be #H5S_SEL_POINTS for point + * region. Possible values can be #H5S_SEL_POINTS for point * selection and #H5S_SEL_HYPERSLABS for hyperslab selection. * - * The parameter \p numelem describes how many elements will be - * placed in the buffer \p buf. The number should be interpreted + * The parameter \p numelem describes how many elements will be + * placed in the buffer \p buf. The number should be interpreted * using the value of \p sel_type. * - * If value of \p sel_type is #H5S_SEL_HYPERSLABS, the parameter - * \p buf contains \p numelem blocks of the coordinates for each - * simple hyperslab of the referenced region. Each block has - * length \c 2*\p rank and is organized as follows: <"start" coordinate>, - * immediately followed by <"opposite" corner coordinate>. - * The total size of the buffer to hold the description of the - * region will be \c 2*\p rank*\p numelem. If region reference - * points to a contiguous sub-array, then the value of \p numelem - * is 1 and the block contains coordinates of the upper left and + * If value of \p sel_type is #H5S_SEL_HYPERSLABS, the parameter + * \p buf contains \p numelem blocks of the coordinates for each + * simple hyperslab of the referenced region. Each block has + * length \c 2*\p rank and is organized as follows: <"start" coordinate>, + * immediately followed by <"opposite" corner coordinate>. + * The total size of the buffer to hold the description of the + * region will be \c 2*\p rank*\p numelem. If region reference + * points to a contiguous sub-array, then the value of \p numelem + * is 1 and the block contains coordinates of the upper left and * lower right corners of the sub-array (or simple hyperslab). * - * If value of \p sel_type is #H5S_SEL_POINTS, the parameter \p buf - * contains \p numelem blocks of the coordinates for each selected - * point of the referenced region. Each block has length \p rank - * and contains coordinates of the element. The total size of the - * buffer to hold the description of the region will be + * If value of \p sel_type is #H5S_SEL_POINTS, the parameter \p buf + * contains \p numelem blocks of the coordinates for each selected + * point of the referenced region. Each block has length \p rank + * and contains coordinates of the element. The total size of the + * buffer to hold the description of the region will be * \p rank* \p numelem. * * @@ -481,14 +448,14 @@ H5_HLRDLL herr_t H5LRread_region(hid_t obj_id, * */ H5_HLRDLL herr_t H5LRget_region_info(hid_t obj_id, - const hdset_reg_ref_t *ref, - size_t *len, - char *path, - int *rank, - hid_t *dtype, - H5S_sel_type *sel_type, - size_t *numelem, - hsize_t *buf ); + const hdset_reg_ref_t *ref, + size_t *len, + char *path, + int *rank, + hid_t *dtype, + H5S_sel_type *sel_type, + size_t *numelem, + hsize_t *buf ); @@ -503,35 +470,33 @@ H5_HLRDLL herr_t H5LRget_region_info(hid_t obj_id, * -------------------------------------------------------------------------- * \ingroup H5LR * - * \brief Copies data from a specified region in a source dataset + * \brief Copies data from a specified region in a source dataset * to a specified region in a destination dataset * * \param[in] file_src Name of the source file * \param[in] path_src Full path to the source dataset - * \param[in] block_coord_src Hyperslab coordinates in the - * source dataset + * \param[in] block_coord_src Hyperslab coordinates in the source dataset * \param[in] file_dest Name of the destination file * \param[in] path_dest Full path to the destination dataset - * \param[in] block_coord_dset Hyperslab coordinates in the - * destination dataset + * \param[in] block_coord_dset Hyperslab coordinates in the destination dataset * * \return \herr_t * - * \details Given a path to a dataset \p path_src in a file with the - * name \p file_src, and description of a simple hyperslab of - * the source \p block_coord_src, the function will write data - * to the dataset \p path_dest in file \p file_dest to the - * simple hyperslab specified by \p block_coord_dset. - * The arrays \p block_coord_src and \p block_coord_dset have - * a length of 2*rank and are the coordinates of the starting - * point following by the coordinates of the ending point of the - * hyperslab. For example, to specify a rectangular hyperslab - * destination region starting at element (2,2) to element (5,4) + * \details Given a path to a dataset \p path_src in a file with the + * name \p file_src, and description of a simple hyperslab of + * the source \p block_coord_src, the function will write data + * to the dataset \p path_dest in file \p file_dest to the + * simple hyperslab specified by \p block_coord_dset. + * The arrays \p block_coord_src and \p block_coord_dset have + * a length of 2*rank and are the coordinates of the starting + * point following by the coordinates of the ending point of the + * hyperslab. For example, to specify a rectangular hyperslab + * destination region starting at element (2,2) to element (5,4) * then \p block_coord_dset would be {2, 2, 5, 4}. * - * If \p path_dest does not exist in the destination file - * (as may be the case when writing to a new file) then the - * dataset will be copied directly to the \p path_dest and + * If \p path_dest does not exist in the destination file + * (as may be the case when writing to a new file) then the + * dataset will be copied directly to the \p path_dest and * \p block_coord_dset will be disregarded. * * \version 1.1 Fortran wrapper introduced in this release. @@ -540,11 +505,11 @@ H5_HLRDLL herr_t H5LRget_region_info(hid_t obj_id, * */ H5_HLRDLL herr_t H5LTcopy_region(const char *file_src, - const char *path_src, - const hsize_t *block_coord_src, - const char *file_dest, - const char *path_dest, - const hsize_t *block_coord_dset); + const char *path_src, + const hsize_t *block_coord_src, + const char *file_dest, + const char *path_dest, + const hsize_t *block_coord_dset); /*------------------------------------------------------------------------- * @@ -562,27 +527,25 @@ H5_HLRDLL herr_t H5LTcopy_region(const char *file_src, * \param[in] file Name of file * \param[in] path Full path to a dataset * \param[in] block_coord Hyperslab coordinates - * \param[in] mem_type Memory datatype, describing the buffer - * the referenced data will be read into - * \param[out] buf Buffer containing data from the - * referenced region + * \param[in] mem_type Memory datatype, describing the buffer the referenced data will be read into + * \param[out] buf Buffer containing data from the referenced region * * \return \herr_t * - * \details H5LTread_region() reads data from a region described by - * the hyperslab coordinates in \p block_coord, located in - * the dataset specified by its absolute path \p path in a - * file specified by its name \p file. Data is read into a - * buffer \p buf of the datatype that corresponds to the + * \details H5LTread_region() reads data from a region described by + * the hyperslab coordinates in \p block_coord, located in + * the dataset specified by its absolute path \p path in a + * file specified by its name \p file. Data is read into a + * buffer \p buf of the datatype that corresponds to the * HDF5 datatype specified by \p mem_type. * - * Buffer \p block_coord has size 2*rank and is the coordinates - * of the starting point following by the coordinates of the - * ending point of the hyperslab. For example, to extract a - * rectangular hyperslab region starting at element (2,2) to + * Buffer \p block_coord has size 2*rank and is the coordinates + * of the starting point following by the coordinates of the + * ending point of the hyperslab. For example, to extract a + * rectangular hyperslab region starting at element (2,2) to * element (5,4) then \p block_coord would be {2, 2, 5, 4}. * - * Buffer \p buf should be big enough to hold selected elements + * Buffer \p buf should be big enough to hold selected elements * of the type that corresponds to the \p mem_type * * \version 1.1 Fortran wrapper introduced in this release. @@ -591,57 +554,55 @@ H5_HLRDLL herr_t H5LTcopy_region(const char *file_src, * */ H5_HLRDLL herr_t H5LTread_region(const char *file, - const char *path, - const hsize_t *block_coord, - hid_t mem_type, - void *buf ); + const char *path, + const hsize_t *block_coord, + hid_t mem_type, + void *buf ); /** * -------------------------------------------------------------------------- * \ingroup H5LR * - * \brief Retrieves the values of quality flags for each element + * \brief Retrieves the values of quality flags for each element * to the application provided buffer. * * \param[in] dset_id Identifier of the dataset with bit-field values * \param[in] num_values Number of the values to be extracted - * \param[in] offset Array of staring bits to be extracted from + * \param[in] offset Array of staring bits to be extracted from * the element; valid values: 0 (zero) through 7 - * \param[in] lengths Array of the number of bits to be extracted - * for each value - * \param[in] space Dataspace identifier, describing the elements - * to be read from the dataset with bit-field - * values + * \param[in] lengths Array of the number of bits to be extracted for each value + * \param[in] space Dataspace identifier, describing the elements + * to be read from the dataset with bit-field values * \param[out] buf Buffer to read the values in * * \return \herr_t * - * \details H5LTread_bitfield_value() reads selected elements from a - * dataset specified by its identifier \p dset_id, and unpacks + * \details H5LTread_bitfield_value() reads selected elements from a + * dataset specified by its identifier \p dset_id, and unpacks * the bit-field values to a buffer \p buf. * - * The parameter \p space is a space identifier that indicates + * The parameter \p space is a space identifier that indicates * which elements of the dataset should be read. * - * The parameter \p offset is an array of length \p num_values; + * The parameter \p offset is an array of length \p num_values; * the ith element of the array holds the value of the - * starting bit of the ith bit-field value. + * starting bit of the ith bit-field value. * Valid values are: 0 (zero) through 7. * - * The parameter \p lengths is an array of length \p num_values; - * the ith element of the array holds the number of - * bits to be extracted for the ith bit-field value. - * Extracted bits will be interpreted as a base-2 integer value. - * Each value will be converted to the base-10 integer value and - * stored in the application buffer. - * - * Buffer \p buf is allocated by the application and should be big - * enough to hold \c num_sel_elem * \p num_values elements of the - * specified type, where \c num_sel_elem is a number of the elements - * to be read from the dataset. Data in the buffer is organized - * as \p num_values values for the first element, followed by the - * \p num_values values for the second element, ... , followed by - * the \p num_values values for the + * The parameter \p lengths is an array of length \p num_values; + * the ith element of the array holds the number of + * bits to be extracted for the ith bit-field value. + * Extracted bits will be interpreted as a base-2 integer value. + * Each value will be converted to the base-10 integer value and + * stored in the application buffer. + * + * Buffer \p buf is allocated by the application and should be big + * enough to hold \c num_sel_elem * \p num_values elements of the + * specified type, where \c num_sel_elem is a number of the elements + * to be read from the dataset. Data in the buffer is organized + * as \p num_values values for the first element, followed by the + * \p num_values values for the second element, ... , followed by + * the \p num_values values for the * \c num_selected_elemth element. * * \version 1.1 Fortran wrapper introduced in this release. @@ -650,5 +611,5 @@ H5_HLRDLL herr_t H5LTread_region(const char *file, * */ H5_HLRDLL herr_t H5LTread_bitfield_value(hid_t dset_id, int num_values, const unsigned *offset, - const unsigned *lengths, hid_t space, int *buf); + const unsigned *lengths, hid_t space, int *buf); diff --git a/doxygen/dox/high_level/high_level.dox b/doxygen/dox/high_level/high_level.dox deleted file mode 100644 index c53d298..0000000 --- a/doxygen/dox/high_level/high_level.dox +++ /dev/null @@ -1,29 +0,0 @@ -/** \page high_level High-level library - * The high-level HDF5 library includes several sets of convenience and standard-use APIs to - * facilitate common HDF5 operations. - * - *
    - *
  • \ref H5LT "Lite (H5LT, H5LD)" - * \n - * Functions to simplify creating and manipulating datasets, attributes and other features - *
  • \ref H5IM "Image (H5IM)" - * \n - * Creating and manipulating HDF5 datasets intended to be interpreted as images - *
  • \ref H5TB "Table (H5TB)" - * \n - * Creating and manipulating HDF5 datasets intended to be interpreted as tables - *
  • \ref H5PT "Packet Table (H5PT)" - * \n - * Creating and manipulating HDF5 datasets to support append- and read-only operations on table data - *
  • \ref H5DS "Dimension Scale (H5DS)" - * \n - * Creating and manipulating HDF5 datasets that are associated with the dimension of another HDF5 dataset - *
  • \ref H5DO "Optimizations (H5DO)" - * \n - * Bypassing default HDF5 behavior in order to optimize for specific use cases - *
  • \ref H5LR "Extensions (H5LR, H5LT)" - * \n - * Working with region references, hyperslab selections, and bit-fields - *
- * - */ diff --git a/doxygen/dox/rm-template.dox b/doxygen/dox/rm-template.dox index bd81f64..1e9f2d7 100644 --- a/doxygen/dox/rm-template.dox +++ b/doxygen/dox/rm-template.dox @@ -96,4 +96,4 @@ the HDF5 User's Guide. + in the HDF5 User Guide. - -h3 { display: block; - margin-top: 8px; - margin-bottom: 8px; - margin-left: 0px; - margin-right: 0px; - text-indent: 0px; - } - -h4 { display: block; - margin-top: 8px; - margin-bottom: 8px; - margin-left: 0px; - margin-right: 0px; - text-indent: 0px; - } - -p { display: block; +the hr tags. -->h3 { + display: block; + margin-top: 8px; + margin-bottom: 8px; + margin-left: 0px; + margin-right: 0px; + text-indent: 0px; +} + +h4 { + display: block; margin-top: 8px; margin-bottom: 8px; margin-left: 0px; margin-right: 0px; text-indent: 0px; - } +} + +p { + display: block; + margin-top: 8px; + margin-bottom: 8px; + margin-left: 0px; + margin-right: 0px; + text-indent: 0px; +} + + - - -table.format { border:solid; - border-collapse:collapse; - caption-side:top; - text-align:center; - width:80%; - } -table.format th { border:ridge; - padding:4px; - width:25%; - } -table.format td { border:ridge; - padding:4px; - } -table.format caption { font-weight:bold; - font-size:larger; - } - -table.note {border:none; - text-align:right; - width:80%; - } - -table.desc { border:solid; - border-collapse:collapse; - caption-size:top; - text-align:left; - width:80%; - } -table.desc tr { vertical-align:top; - } -table.desc th { border-style:ridge; - font-size:larger; - padding:4px; - - } -table.desc td { border-style:ridge; - - vertical-align:text-top; - } -table.desc caption { font-weight:bold; - font-size:larger; - } - -table.list { border:none; - width:100% - } -table.list tr { vertical-align:text-top; - } -table.list th { border:none; - text-decoration:underline; - vertical-align:text-top; - } -table.list td { border:none; - vertical-align:text-top; - } - -table.msgdesc { border:none; - text-align:left; - width: 80% - } -table.msgdesc tr { vertical-align:text-top; - border-spacing:0; - padding:0; } -table.msgdesc th { border:none; - text-decoration:underline; - vertical-align:text-top; } -table.msgdesc td { border:none; - vertical-align:text-top; - } - -table.list80 { border:none; - width:80% - } -table.list80 tr { vertical-align:text-top; - } -table.list80 th { border:none; - text-decoration:underline; - vertical-align:text-top; - } -table.list80 td { border:none; - vertical-align:text-top; - } - -table.glossary { border:none; - text-align:left; - width: 80% - } -table.glossary tr { vertical-align:text-top; - border-spacing:0; - padding:0; } -table.glossary th { border:none; - text-align:left; - text-decoration:underline; - vertical-align:text-top; } -table.glossary td { border:none; - text-align:left; - vertical-align:text-top; - } - -div { page-break-inside:avoid; - page-break-after:auto - } +p.item2 { + margin-left: 2em; + text-indent: 2em +} +--> +table.format { + border: solid; + border-collapse: collapse; + caption-side: top; + text-align: center; + width: 80%; +} + +table.format th { + border: ridge; + padding: 4px; + width: 25%; +} + +table.format td { + border: ridge; + padding: 4px; +} + +table.format caption { + font-weight: bold; + font-size: larger; +} + +table.note { + border: none; + text-align: right; + width: 80%; +} + +table.desc { + border: solid; + border-collapse: collapse; + caption-size: top; + text-align: left; + width: 80%; +} + +table.desc tr { + vertical-align: top; +} + +table.desc th { + border-style: ridge; + font-size: larger; + padding: 4px; +} + +table.desc td { + border-style: ridge; + vertical-align: text-top; +} + +table.desc caption { + font-weight: bold; + font-size: larger; +} + +table.list { + border: none; + width: 100% +} + +table.list tr { + vertical-align: text-top; +} + +table.list th { + border: none; + text-decoration: underline; + vertical-align: text-top; +} + +table.list td { + border: none; + vertical-align: text-top; +} + +table.msgdesc { + border: none; + text-align: left; + width: 80% +} + +table.msgdesc tr { + vertical-align: text-top; + border-spacing: 0; + padding: 0; +} + +table.msgdesc th { + border: none; + text-decoration: underline; + vertical-align: text-top; +} + +table.msgdesc td { + border: none; + vertical-align: text-top; +} + +table.list80 { + border: none; + width: 80% +} + +table.list80 tr { + vertical-align: text-top; +} + +table.list80 th { + border: none; + text-decoration: underline; + vertical-align: text-top; +} + +table.list80 td { + border: none; + vertical-align: text-top; +} + +table.glossary { + border: none; + text-align: left; + width: 80% +} + +table.glossary tr { + vertical-align: text-top; + border-spacing: 0; + padding: 0; +} + +table.glossary th { + border: none; + text-align: left; + text-decoration: underline; + vertical-align: text-top; +} + +table.glossary td { + border: none; + text-align: left; + vertical-align: text-top; +} + +div { + page-break-inside: avoid; + page-break-after: auto +} -
+
- - + - - - - + + + + -
-
    -
  1. Introduction
  2. - -
      -
    1. This Document
    2. -
    3. Changes for HDF5 1.10
    4. -
    -
    - -
  3. Disk Format: Level 0 - File Metadata
  4. - -
      -
    1. Disk Format: Level 0A - Format Signature and Superblock
    2. -
    3. Disk Format: Level 0B - File Driver Info
    4. -
    5. Disk Format: Level 0C - Superblock Extension
    6. -
    -
    -
  5. Disk Format: Level 1 - File Infrastructure
  6. - -
      -
    1. Disk Format: Level 1A - B-trees and B-tree - Nodes
    2. -
        -
      1. Disk Format: Level 1A1 - Version 1 - B-trees (B-link Trees)
      2. -
      3. Disk Format: Level 1A2 - Version 2 - B-trees
      4. -
      -
    3. Disk Format: Level 1B - Group Symbol Table Nodes
    4. -
    5. Disk Format: Level 1C - Symbol Table Entry
    6. -
    7. Disk Format: Level 1D - Local Heaps
    8. -
    9. Disk Format: Level 1E - Global Heap
    10. -
    11. Disk Format: Level 1F - Fractal Heap
    12. -
    13. Disk Format: Level 1G - Free-space Manager
    14. -
    15. Disk Format: Level 1H - Shared Object Header Message Table
    16. -
    -
    -
  7. Disk Format: Level 2 - Data Objects
  8. - -
      -
    1. Disk Format: Level 2A - Data Object Headers
    2. -
        -
      1. Disk Format: Level 2A1 - Data Object Header Prefix
      2. -
          -
        1. Version 1 Data Object Header Prefix
        2. -
        3. Version 2 Data Object Header Prefix
        4. -
        -
      3. Disk Format: Level 2A2 - Data Object Header Messages
      4. -
          -
        1. The NIL Message
        2. -
        3. The Dataspace Message
        4. -
        5. The Link Info Message
        6. +
+
    +
  1. Introduction
  2. + +
      +
    1. This Document
    2. +
    3. Changes for HDF5 1.10
    4. +
    +
    + +
  3. Disk Format: Level 0 - File + Metadata
  4. + +
      +
    1. Disk Format: Level 0A - Format + Signature and Superblock
    2. +
    3. Disk Format: Level 0B - File + Driver Info
    4. +
    5. Disk Format: Level 0C - + Superblock Extension
    6. +
    +
    +
  5. Disk Format: Level 1 - File + Infrastructure
  6. + +
      +
    1. Disk Format: Level 1A - B-trees + and B-tree Nodes
    2. +
        +
      1. Disk Format: Level 1A1 - + Version 1 B-trees (B-link Trees)
      2. +
      3. Disk Format: Level 1A2 - + Version 2 B-trees
      4. +
      +
    3. Disk Format: Level 1B - Group + Symbol Table Nodes
    4. +
    5. Disk Format: Level 1C - + Symbol Table Entry
    6. +
    7. Disk Format: Level 1D - Local + Heaps
    8. +
    9. Disk Format: Level 1E - Global + Heap
    10. +
    11. Disk Format: Level 1F - + Fractal Heap
    12. +
    13. Disk Format: Level 1G - + Free-space Manager
    14. +
    15. Disk Format: Level 1H - Shared + Object Header Message Table
    16. +
    +
    +
  7. Disk Format: Level 2 - Data + Objects
  8. + +
      +
    1. Disk Format: Level 2A - Data + Object Headers
    2. +
        +
      1. Disk Format: Level + 2A1 - Data Object Header Prefix
      2. +
          +
        1. Version 1 Data + Object Header Prefix
        2. +
        3. Version 2 Data + Object Header Prefix
        4. +
        +
      3. Disk Format: Level + 2A2 - Data Object Header Messages
      4. +
          +
        1. The NIL Message
        2. + +
        3. The Dataspace Message
        4. + +
        5. The Link Info Message
        6. + +
        +
      +
    +
- - - - -
  -
    -
  1. Disk Format: Level 2 - Data - Objects (Continued)
  2. -
      -
    1. Disk Format: Level 2A - Data Object - Headers (Continued)
    2. -
        -
      1. Disk Format: Level 2A2 - - Data Object Header Messages (Continued)
      2. -
          -
        1. The Datatype Message
        2. -
        3. The Data Storage - - Fill Value (Old) Message
        4. -
        5. The Data Storage - - Fill Value Message
        6. -
        7. The Link Message
        8. -
        9. The Data Storage - - External Data Files Message
        10. -
        11. The Data Storage - - Layout Message
        12. -
        13. The Bogus Message
        14. -
        15. The Group Info - Message
        16. -
        17. The Data Storage - - Filter Pipeline Message
        18. -
        19. The Attribute - Message
        20. -
        21. The Object Comment - Message
        22. -
        23. The Object - Modification Time (Old) Message
        24. -
        25. The Shared Message - Table Message
        26. -
        27. The Object Header - Continuation Message
        28. -
        29. The Symbol - Table Message
        30. -
        31. The Object - Modification Time Message
        32. -
        33. The B-tree - ‘K’ Values Message
        34. -
        35. The Driver Info - Message
        36. -
        37. The Attribute Info - Message
        38. -
        39. The Object Reference - Count Message
        40. -
        41. The File Space Info - Message
        42. +
  +
    +
  1. Disk Format: Level 2 - Data + Objects (Continued)
  2. +
      +
    1. Disk Format: Level 2A - Data + Object Headers (Continued)
    2. +
        +
      1. Disk Format: Level + 2A2 - Data Object Header Messages (Continued)
      2. +
          +
        1. The Datatype Message
        2. + +
        3. The Data Storage - + Fill Value (Old) Message
        4. + +
        5. The Data Storage - Fill + Value Message
        6. + +
        7. The Link Message
        8. + +
        9. The Data Storage + - External Data Files Message
        10. + +
        11. The Data Storage - Layout + Message
        12. + +
        13. The Bogus Message
        14. + +
        15. The Group Info Message
        16. + +
        17. The Data Storage - Filter + Pipeline Message
        18. + +
        19. The Attribute Message
        20. + +
        21. The Object Comment + Message
        22. + +
        23. The Object + Modification Time (Old) Message
        24. + +
        25. The Shared Message + Table Message
        26. + +
        27. The Object Header + Continuation Message
        28. + +
        29. The Symbol Table + Message
        30. + +
        31. The Object + Modification Time Message
        32. + +
        33. The B-tree + ‘K’ Values Message
        34. + +
        35. The Driver Info Message
        36. + +
        37. The Attribute Info Message
        38. + +
        39. The Object Reference + Count Message
        40. + +
        41. The File Space Info + Message
        42. + +
        +
      +
    3. Disk Format: Level 2B - Data + Object Data Storage
    4. +
    + +
  3. Appendix A: Definitions
  4. +
  5. Appendix B: File Memory + Allocation Types
- -
  • Disk Format: Level 2B - Data Object Data Storage
  • - - -
  • Appendix A: Definitions
  • -
  • Appendix B: File Memory Allocation Types
  • - -
    +
    @@ -293,14610 +396,14857 @@ div { page-break-inside:avoid;

    I. Introduction

    - - - - - - -
      -
    - HDF5 Groups -
     
      - Figure 1: Relationships among the HDF5 root group, other groups, and objects -
    -
     
      - HDF5 Objects -  
      - Figure 2: HDF5 objects -- datasets, datatypes, or dataspaces -
    -
     
    - - -

    The format of an HDF5 file on disk encompasses several - key ideas of the HDF4 and AIO file formats as well as - addressing some shortcomings therein. The new format is - more self-describing than the HDF4 format and is more - uniformly applied to data objects in the file.

    - -

    An HDF5 file appears to the user as a directed graph. - The nodes of this graph are the higher-level HDF5 objects - that are exposed by the HDF5 APIs:

    - -
      -
    • Groups
    • -
    • Datasets
    • -
    • Committed (formerly Named) datatypes
    • -
    - -

    At the lowest level, as information is actually written to the disk, - an HDF5 file is made up of the following objects:

    -
      -
    • A superblock
    • -
    • B-tree nodes
    • -
    • Heap blocks
    • -
    • Object headers
    • -
    • Object data
    • -
    • Free space
    • -
    - -

    The HDF5 Library uses these low-level objects to represent the - higher-level objects that are then presented to the user or - to applications through the APIs. For instance, a group is an - object header that contains a message that points to a local - heap (for storing the links to objects in the group) and to a - B-tree (which indexes the links). A dataset is an object header - that contains messages that describe datatype, dataspace, layout, - filters, external files, fill value, and other elements with the - layout message pointing to either a raw data chunk or to a - B-tree that points to raw data chunks.

    + + + + + + + + + + + + + + + + + + + + + + +
      +
    HDF5 Groups +
     
     Figure 1: Relationships among + the HDF5 root group, other groups, and objects +
     
     HDF5 Objects 
     Figure 2: HDF5 objects -- + datasets, datatypes, or dataspaces +
     
    + + +

    The format of an HDF5 file on disk encompasses several key ideas + of the HDF4 and AIO file formats as well as addressing some + shortcomings therein. The new format is more self-describing than the + HDF4 format and is more uniformly applied to data objects in the file.

    + +

    An HDF5 file appears to the user as a directed graph. The nodes + of this graph are the higher-level HDF5 objects that are exposed by the + HDF5 APIs:

    + +
      +
    • Groups
    • +
    • Datasets
    • +
    • Committed (formerly Named) datatypes
    • +
    + +

    At the lowest level, as information is actually written to the + disk, an HDF5 file is made up of the following objects:

    +
      +
    • A superblock
    • +
    • B-tree nodes
    • +
    • Heap blocks
    • +
    • Object headers
    • +
    • Object data
    • +
    • Free space
    • +
    + +

    The HDF5 Library uses these low-level objects to represent the + higher-level objects that are then presented to the user or to + applications through the APIs. For instance, a group is an object + header that contains a message that points to a local heap (for storing + the links to objects in the group) and to a B-tree (which indexes the + links). A dataset is an object header that contains messages that + describe datatype, dataspace, layout, filters, external files, fill + value, and other elements with the layout message pointing to either a + raw data chunk or to a B-tree that points to raw data chunks.


    I.A. This Document

    -

    This document describes the lower-level data objects; - the higher-level objects and their properties are described - in the HDF5 User’s Guide.

    - -

    Three levels of information comprise the file format. - Level 0 contains basic information for identifying and - defining information about the file. Level 1 information contains - the information about the pieces of a file shared by many objects - in the file (such as a B-trees and heaps). Level 2 is the rest - of the file and contains all of the data objects, with each object - partitioned into header information, also known as - metadata, and data.

    - -

    The sizes of various fields in the following layout tables are - determined by looking at the number of columns the field spans - in the table. There are three exceptions: (1) The size may be - overridden by specifying a size in parentheses, (2) the size of - addresses is determined by the Size of Offsets field - in the superblock and is indicated in this document with a - superscripted ‘O’, and (3) the size of length fields is determined - by the Size of Lengths field in the superblock and is - indicated in this document with a superscripted ‘L’.

    - -

    Values for all fields in this document should be treated as unsigned - integers, unless otherwise noted in the description of a field. - Additionally, all metadata fields are stored in little-endian byte - order. -

    - -

    All checksums used in the format are computed with the - Jenkins’ - lookup3 algorithm. -

    - -

    Whenever a bit flag or field is mentioned for an entry, bits are - numbered from the lowest bit position in the entry. -

    - -

    Various tables in this document aligned with “This space inserted - only to align table nicely”. These entries in the table are just - to make the table presentation nicer and do not represent any values - or padding in the file. -

    +

    + This document describes the lower-level data objects; the higher-level + objects and their properties are described in the HDF5 + User Guide. +

    + +

    + Three levels of information comprise the file format. Level 0 contains + basic information for identifying and defining information about the + file. Level 1 information contains the information about the pieces of + a file shared by many objects in the file (such as a B-trees and + heaps). Level 2 is the rest of the file and contains all of the data + objects, with each object partitioned into header information, also + known as metadata, and data. +

    + +

    + The sizes of various fields in the following layout tables are + determined by looking at the number of columns the field spans in the + table. There are three exceptions: (1) The size may be overridden by + specifying a size in parentheses, (2) the size of addresses is + determined by the Size of Offsets field in the superblock and + is indicated in this document with a superscripted ‘O’, and + (3) the size of length fields is determined by the Size of + Lengths field in the superblock and is indicated in this document with + a superscripted ‘L’. +

    + +

    Values for all fields in this document should be treated as + unsigned integers, unless otherwise noted in the description of a + field. Additionally, all metadata fields are stored in little-endian + byte order.

    + +

    + All checksums used in the format are computed with the Jenkins’ + lookup3 algorithm. +

    + +

    Whenever a bit flag or field is mentioned for an entry, bits are + numbered from the lowest bit position in the entry.

    + +

    Various tables in this document aligned with “This space + inserted only to align table nicely”. These entries in the table + are just to make the table presentation nicer and do not represent any + values or padding in the file.


    I.B. Changes for HDF5 1.10

    -

    As of October 2015, changes in the file format for HDF5 1.10 - have not yet been finalized.

    +

    As of October 2015, changes in the file format for HDF5 1.10 have + not yet been finalized.




    -

    -II. Disk Format: Level 0 - File Metadata

    - -
    -

    -II.A. Disk Format: Level 0A - Format Signature and Superblock

    - -

    The superblock may begin at certain predefined offsets within - the HDF5 file, allowing a block of unspecified content for - users to place additional information at the beginning (and - end) of the HDF5 file without limiting the HDF5 Library’s - ability to manage the objects within the file itself. This - feature was designed to accommodate wrapping an HDF5 file in - another file format or adding descriptive information to an HDF5 - file without requiring the modification of the actual file’s - information. The superblock is located by searching for the - HDF5 format signature at byte offset 0, byte offset 512, and at - successive locations in the file, each a multiple of two of - the previous location; in other words, at these byte offsets: - 0, 512, 1024, 2048, and so on.

    - -

    The superblock is composed of the format signature, followed by a - superblock version number and information that is specific to each - version of the superblock. - Currently, there are three versions of the superblock format. - Version 0 is the default format, while version 1 is basically the same - as version 0 with additional information when a non-default B-tree ‘K’ - value is stored. Version 2 is the latest format, with some fields - eliminated or compressed and with superblock extension and checksum - support.

    - -

    Version 0 and 1 of the superblock are described below:

    - - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - Superblock (Versions 0 and 1) -
    bytebytebytebyte

    Format Signature (8 bytes)

    Version # of SuperblockVersion # of File’s Free Space StorageVersion # of Root Group Symbol Table EntryReserved (zero)
    Version # of Shared Header Message FormatSize of OffsetsSize of LengthsReserved (zero)
    Group Leaf Node KGroup Internal Node K
    File Consistency Flags
    Indexed Storage Internal Node K1Reserved (zero)1

    Base AddressO


    Address of File Free space InfoO


    End of File AddressO


    Driver Information Block AddressO

    Root Group Symbol Table Entry
    - - - - - - - - -
      - (Items marked with an ‘O’ in the above table are - of the size specified in “Size of Offsets.”) -
      - (Items marked with a ‘1’ in the above table are - new in version 1 of the superblock) -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameDescription

    Format Signature

    This field contains a constant value and can be used to - quickly identify a file as being an HDF5 file. The - constant value is designed to allow easy identification of - an HDF5 file and to allow certain types of data corruption - to be detected. The file signature of an HDF5 file always - contains the following values:

    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Decimal:13772687013102610
    Hexadecimal:894844460d0a1a0a
    ASCII C Notation:\211HDF\r\n\032\n
    -
    -

    This signature both identifies the file as an HDF5 file - and provides for immediate detection of common - file-transfer problems. The first two bytes distinguish - HDF5 files on systems that expect the first two bytes to - identify the file type uniquely. The first byte is - chosen as a non-ASCII value to reduce the probability - that a text file may be misrecognized as an HDF5 file; - also, it catches bad file transfers that clear bit - 7. Bytes two through four name the format. The CR-LF - sequence catches bad file transfers that alter newline - sequences. The control-Z character stops file display - under MS-DOS. The final line feed checks for the inverse - of the CR-LF translation problem. (This is a direct - descendent of the - PNG file - signature.)

    -

    This field is present in version 0+ of the superblock. -

    Version Number of the Superblock

    This value is used to determine the format of the - information in the superblock. When the format of the - information in the superblock is changed, the version number - is incremented to the next integer and can be used to - determine how the information in the superblock is - formatted.

    - -

    Values of 0, 1 and 2 are defined for this field. (The format - of version 2 is described below, not here) -

    - -

    This field is present in version 0+ of the superblock. -

    -

    Version Number of the File’s Free Space - Information

    -

    This value is used to determine the format of the - file’s free space information. -

    -

    The only value currently valid in this field is ‘0’, which - indicates that the file’s free space is as described - below. -

    - -

    This field is present in version 0 and 1 of the superblock. -

    -

    Version Number of the Root Group Symbol Table - Entry

    This value is used to determine the format of the - information in the Root Group Symbol Table Entry. When the - format of the information in that field is changed, the - version number is incremented to the next integer and can be - used to determine how the information in the field - is formatted.

    -

    The only value currently valid in this field is ‘0’, - which indicates that the root group symbol table entry is - formatted as described below.

    -

    This field is present in version 0 and 1 of the - superblock.

    -

    Version Number of the Shared Header Message Format

    This value is used to determine the format of the - information in a shared object header message. Since the format - of the shared header messages differs from the other private - header messages, a version number is used to identify changes - in the format. -

    -

    The only value currently valid in this field is ‘0’, which - indicates that shared header messages are formatted as - described below. -

    - -

    This field is present in version 0 and 1 of the superblock. -

    -

    Size of Offsets

    This value contains the number of bytes used to store - addresses in the file. The values for the addresses of - objects in the file are offsets relative to a base address, - usually the address of the superblock signature. This - allows a wrapper to be added after the file is created - without invalidating the internal offset locations. -

    - -

    This field is present in version 0+ of the superblock. -

    -

    Size of Lengths

    This value contains the number of bytes used to store - the size of an object. -

    -

    This field is present in version 0+ of the superblock. -

    -

    Group Leaf Node K

    -

    Each leaf node of a group B-tree will have at - least this many entries but not more than twice this - many. If a group has a single leaf node then it - may have fewer entries. -

    -

    This value must be greater than zero. -

    -

    See the description of B-trees below. -

    - -

    This field is present in version 0 and 1 of the superblock. -

    -

    Group Internal Node K

    -

    Each internal node of a group B-tree will have at - least this many entries but not more than twice this - many. If the group has only one internal - node then it might have fewer entries. -

    -

    This value must be greater than zero. -

    -

    See the description of B-trees below. -

    - -

    This field is present in version 0 and 1 of the superblock. -

    -

    File Consistency Flags

    -

    This value contains flags to indicate information - about the consistency of the information contained - within the file. Currently, the following bit flags are - defined: -

      -
    • Bit 0 set indicates that the file is opened for - write-access.
    • -
    • Bit 1 set indicates that the file has - been verified for consistency and is guaranteed to be - consistent with the format defined in this document.
    • -
    • Bits 2-31 are reserved for future use.
    • -
    - Bit 0 should be - set as the first action when a file is opened for write - access and should be cleared only as the final action - when closing a file. Bit 1 should be cleared during - normal access to a file and only set after the file’s - consistency is guaranteed by the library or a - consistency utility. -

    - -

    This field is present in version 0+ of the superblock. -

    -

    Indexed Storage Internal Node K

    -

    Each internal node of an indexed storage B-tree will have at - least this many entries but not more than twice this - many. If the index storage B-tree has only one internal - node then it might have fewer entries. -

    -

    This value must be greater than zero. -

    -

    See the description of B-trees below. -

    - -

    This field is present in version 1 of the superblock. -

    -

    Base Address

    -

    This is the absolute file address of the first byte of - the HDF5 data within the file. The library currently - constrains this value to be the absolute file address - of the superblock itself when creating new files; - future versions of the library may provide greater - flexibility. When opening an existing file and this address does - not match the offset of the superblock, the library assumes - that the entire contents of the HDF5 file have been adjusted in - the file and adjusts the base address and end of file address to - reflect their new positions in the file. Unless otherwise noted, - all other file addresses are relative to this base - address. -

    - -

    This field is present in version 0+ of the superblock. -

    -

    Address of Global Free-space Index

    -

    The file’s free space is not persistent for version 0 and 1 of - the superblock. - Currently this field always contains the - undefined address. -

    - -

    This field is present in version 0 and 1 of the superblock. -

    -

    End of File Address

    -

    This is the absolute file address of the first byte past - the end of all HDF5 data. It is used to determine whether a - file has been accidentally truncated and as an address where - file data allocation can occur if space from the free list is - not used. -

    - -

    This field is present in version 0+ of the superblock. -

    -

    Driver Information Block Address

    -

    This is the relative file address of the file driver - information block which contains driver-specific - information needed to reopen the file. If there is no - driver information block then this entry should be the - undefined address. -

    - -

    This field is present in version 0 and 1 of the superblock. -

    -

    Root Group Symbol Table Entry

    -

    This is the symbol table entry - of the root group, which serves as the entry point into - the group graph for the file. -

    - -

    This field is present in version 0 and 1 of the superblock. -

    -
    -
    +

    + II. Disk Format: Level 0 - File Metadata +

    -
    -

    Version 2 of the superblock is described below:

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - Superblock (Version 2) -
    bytebytebytebyte

    Format Signature (8 bytes)

    Version # of SuperblockSize of OffsetsSize of LengthsFile Consistency Flags

    Base AddressO


    Superblock Extension AddressO


    End of File AddressO


    Root Group Object Header AddressO

    Superblock Checksum
    +
    +

    + II.A. Disk Format: Level 0A - Format + Signature and Superblock +

    - - - - -
      - (Items marked with an ‘O’ in the above table are - of the size specified in “Size of Offsets.”) -
    +

    The superblock may begin at certain predefined offsets within the + HDF5 file, allowing a block of unspecified content for users to place + additional information at the beginning (and end) of the HDF5 file + without limiting the HDF5 Library’s ability to manage the objects + within the file itself. This feature was designed to accommodate + wrapping an HDF5 file in another file format or adding descriptive + information to an HDF5 file without requiring the modification of the + actual file’s information. The superblock is located by searching + for the HDF5 format signature at byte offset 0, byte offset 512, and at + successive locations in the file, each a multiple of two of the + previous location; in other words, at these byte offsets: 0, 512, 1024, + 2048, and so on.

    -
    +

    The superblock is composed of the format signature, followed by a + superblock version number and information that is specific to each + version of the superblock. Currently, there are three versions of the + superblock format. Version 0 is the default format, while version 1 is + basically the same as version 0 with additional information when a + non-default B-tree ‘K’ value is stored. Version 2 is the + latest format, with some fields eliminated or compressed and with + superblock extension and checksum support.

    -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +

    Version 0 and 1 of the superblock are described below:

    -
    Field NameDescription

    Format Signature

    -

    This field is the same as described for versions 0 and 1 of the - superblock. -

    Version Number of the Superblock

    -

    This field has a value of 2 and has the same meaning as for - versions 0 and 1. -

    -

    Size of Offsets

    -

    This field is the same as described for versions 0 and 1 of the - superblock. -

    -

    Size of Lengths

    -

    This field is the same as described for versions 0 and 1 of the - superblock. -

    -

    File Consistency Flags

    -

    This field is the same as described for versions 0 and 1 except - that it is smaller (the number of reserved bits has been reduced - from 30 to 6). -

    -

    Base Address

    -

    This field is the same as described for versions 0 and 1 of the - superblock. -

    -

    Superblock Extension Address

    -

    The field is the address of the object header for the - superblock extension. - If there is no extension then this entry should be the - undefined address. -

    -

    End of File Address

    -

    This field is the same as described for versions 0 and 1 of the - superblock. -

    -

    Root Group Object Header Address

    -

    This is the address of - the root group object header, - which serves as the entry point into the group graph for the file. -

    -

    Superblock Checksum

    -

    The checksum for the superblock. -

    -
    -
    -
    -

    -II.B. Disk Format: Level 0B - File Driver Info

    +
    + + -

    The driver information block is an optional region of the - file which contains information needed by the file driver - to reopen a file. The format is described below:

    + + + + + + + + + -
    -
    Superblock (Versions 0 and 1)
    bytebytebytebyte

    Format Signature (8 bytes)
    +
    - + + + + + + - - - - - + + + + + - - + + - + - + + - + -
    - Driver Information Block -
    Version # of SuperblockVersion # of File’s Free Space StorageVersion # of Root Group Symbol Table EntryReserved (zero)
    bytebytebytebyte
    Version # of Shared Header Message FormatSize of OffsetsSize of LengthsReserved (zero)
    VersionReservedGroup Leaf Node KGroup Internal Node K
    Driver Information SizeFile Consistency Flags

    Driver Identification (8 bytes)

    Indexed Storage Internal + Node K1 + Reserved (zero)1


    Driver Information (variable size)



    Base AddressO
    +
    -
    -
    -
    - - - - + + - - + - - + - - + +
    Field NameDescription

    Address of File Free space InfoO
    +

    Version

    -

    The version number of the Driver Information Block. - This document describes version 0. -

    -

    End of File AddressO
    +

    Driver Information Size

    -

    The size in bytes of the Driver Information field. -

    -

    Driver Information Block AddressO
    +

    Driver Identification

    -

    This is an eight-byte ASCII string without null - termination which identifies the driver and/or version number - of the Driver Information Block. The predefined driver encoded - in this field by the HDF5 Library is identified by the - letters NCSA followed by the first four characters of - the driver name. If the Driver Information block is not - the original version then the last letter(s) of the - identification will be replaced by a version number in - ASCII, starting with 0. -

    -

    - Identification for user-defined drivers is also eight-byte long. - It can be arbitrary but should be unique to avoid - the four character prefix “NCSA”. -

    -
    Root Group Symbol Table Entry
    -

    Driver Information

    Driver information is stored in a format defined by the - file driver (see description below).
    + + + -
     (Items marked with an ‘O’ in the + above table are of the size specified in “Size of + Offsets.”)
    - + +   + (Items marked with a ‘1’ in the above table are + new in version 1 of the superblock) + + + -
    - The two drivers encoded in the Driver Identification field are as follows: - -

    The format of the Driver Information field for the - above two drivers are described below:

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +
    +
    +
    - Multi Driver Information -
    bytebytebytebyte
    Member MappingMember MappingMember MappingMember Mapping
    Member MappingMember MappingReservedReserved

    Address of Member File 1


    End of Address for Member File 1


    Address of Member File 2


    End of Address for Member File 2


    ... ...


    Address of Member File N


    End of Address for Member File N


    Name of Member File 1 (variable size)


    Name of Member File 2 (variable size)


    ... ...


    Name of Member File N (variable size)

    + + + + + + + + + + + + + + + + + + -
    -
    -
    Field NameDescription

    Format Signature

    This field contains a constant value and can be used + to quickly identify a file as being an HDF5 file. The constant + value is designed to allow easy identification of an HDF5 file and + to allow certain types of data corruption to be detected. The file + signature of an HDF5 file always contains the following values:

    +
    + + + + + + + + + + + + -
    Decimal:13772687013102610
    - +
    Hexadecimal:894844460d0a1a0a
    - - - - + + + + + + + + + + + +
    Field NameDescription
    ASCII C Notation:\211HDF\r\n\032\n
    + +

    + This signature both identifies the file as an HDF5 file and + provides for immediate detection of common file-transfer problems. + The first two bytes distinguish HDF5 files on systems that expect + the first two bytes to identify the file type uniquely. The first + byte is chosen as a non-ASCII value to reduce the probability that + a text file may be misrecognized as an HDF5 file; also, it catches + bad file transfers that clear bit 7. Bytes two through four name + the format. The CR-LF sequence catches bad file transfers that + alter newline sequences. The control-Z character stops file display + under MS-DOS. The final line feed checks for the inverse of the + CR-LF translation problem. (This is a direct descendent of the PNG + file signature.) +

    +

    + This field is present in version 0+ of the superblock. +

    + - -

    Member Mapping

    -

    These fields are integer values from 1 to 6 - indicating how the data can be mapped to or merged with another type of - data. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Member MappingDescription
    1The superblock data.
    2The B-tree data.
    3The raw data.
    4The global heap data.
    5The local heap data.
    6The object header data.

    -

    For example, if the third field has the value 3 and all the rest have the - value 1, it means there are two files: one for raw data, and one for superblock, - B-tree, global heap, local heap, and object header.

    - - - - -

    Reserved

    -

    These fields are reserved and should always be zero.

    - - - -

    Address of Member File N

    -

    This field Specifies the virtual address at which the member file starts.

    -

    N is the number of member files.

    - - - - -

    End of Address for Member File N

    -

    This field is the end of the allocated address for the member file. -

    - - - -

    Name of Member File N

    -

    This field is the null-terminated name of the member file and - its length should be multiples of 8 bytes. - Additional bytes will be padded with NULLs. The default naming - convention is %s-X.h5, where X is one of the letters - s (for superblock), b (for B-tree), r (for raw data), - g (for global heap), l (for local heap), and o (for - object header). The name of the whole HDF5 file will substitute the %s - in the string. -

    - - - -
    + +

    Version Number of the Superblock

    +

    This value is used to determine the format of the + information in the superblock. When the format of the information + in the superblock is changed, the version number is incremented to + the next integer and can be used to determine how the information + in the superblock is formatted.

    -
    -
    - - - - - - - - - - - - - +

    Values of 0, 1 and 2 are defined for this field. (The format + of version 2 is described below, not here)

    -
    - Family Driver Information -
    bytebytebytebyte

    Size of Member File

    -
    +

    + This field is present in version 0+ of the superblock. +

    + -
    -
    - - - - - - - - - - -
    Field NameDescription

    Size of Member File

    This field is the size of the member file in the family of files.

    -
    + +

    Version Number of the File’s Free Space + Information

    + +

    This value is used to determine the format of the + file’s free space information.

    +

    + The only value currently valid in this field is ‘0’, + which indicates that the file’s free space is as described below. +

    -
    -

    -II.C. Disk Format: Level 0C - Superblock Extension

    +

    + This field is present in version 0 and 1 of the + superblock. +

    + + -

    The superblock extension is used to store superblock metadata - which is either optional, or added after the version of the superblock - was defined. Superblock extensions may only exist when version 2+ of - superblock is used. A superblock extension is an object header which may - hold the following messages:

    - + +

    Version Number of the Root Group Symbol Table Entry

    +

    This value is used to determine the format of the + information in the Root Group Symbol Table Entry. When the format + of the information in that field is changed, the version number is + incremented to the next integer and can be used to determine how + the information in the field is formatted.

    +

    + The only value currently valid in this field is ‘0’, + which indicates that the root group symbol table entry is formatted + as described below. +

    +

    + This field is present in version 0 and 1 of the + superblock. +

    + + + +

    Version Number of the Shared Header Message Format

    +

    This value is used to determine the format of the + information in a shared object header message. Since the format of + the shared header messages differs from the other private header + messages, a version number is used to identify changes in the + format.

    +

    + The only value currently valid in this field is ‘0’, + which indicates that shared header messages are formatted as + described below. +

    +

    + This field is present in version 0 and 1 of the + superblock. +

    + + +

    Size of Offsets

    +

    This value contains the number of bytes used to store + addresses in the file. The values for the addresses of objects in + the file are offsets relative to a base address, usually the + address of the superblock signature. This allows a wrapper to be + added after the file is created without invalidating the internal + offset locations.

    -
    -
    -
    -

    -III. Disk Format: Level 1 - File Infrastructure

    - -
    -

    -III.A. Disk Format: Level 1A - B-trees and B-tree Nodes

    - -

    B-trees allow flexible storage for objects which tend to grow - in ways that cause the object to be stored discontiguously. B-trees - are described in various algorithms books including “Introduction to - Algorithms” by Thomas H. Cormen, Charles E. Leiserson, and Ronald - L. Rivest. B-trees are used in several places in the HDF5 file format, - when an index is needed for another data structure.

    - -

    The version 1 B-tree structure described below is the original index - structure, but are limited by some bugs in our implementation (mainly in - how they handle deleting records). The version 1 B-trees are being phased - out in favor of the version 2 B-trees described below, although both - types of structures may be found in the same file, depending on - application settings when creating the file.

    - -
    -

    -III.A.1. Disk Format: Level 1A1 - Version 1 B-trees (B-link Trees)

    - -

    Version 1 B-trees in HDF5 files an implementation of the B-link tree, - in which the sibling nodes at a particular level in the tree are stored - in a doubly-linked list, is described in the “Efficient Locking for - Concurrent Operations on B-trees” paper by Phillip Lehman and S. Bing Yao - as published in the ACM Transactions on Database Systems, - Vol. 6, No. 4, December 1981.

    - -

    The B-link trees implemented by the file format contain one more - key than the number of children. In other words, each child - pointer out of a B-tree node has a left key and a right key. - The pointers out of internal nodes point to sub-trees while - the pointers out of leaf nodes point to symbol nodes and - raw data chunks. - Aside from that difference, internal nodes and leaf nodes - are identical.

    - -
    - - +

    + This field is present in version 0+ of the superblock. +

    + - - - - + + - + + - - - + + - + + - + + - + + - + + - + + - + + - + + +
    - B-link Tree Nodes -
    bytebytebytebyte

    Size of Lengths

    This value contains the number of bytes used to store + the size of an object.

    +

    + This field is present in version 0+ of the superblock. +

    Signature

    Group Leaf Node K

    +

    Each leaf node of a group B-tree will have at least this many + entries but not more than twice this many. If a group has a single + leaf node then it may have fewer entries.

    +

    This value must be greater than zero.

    +

    + See the description of B-trees below. +

    + +

    + This field is present in version 0 and 1 of the + superblock. +

    +
    Node TypeNode LevelEntries Used

    Group Internal Node K

    +

    Each internal node of a group B-tree will have at least this + many entries but not more than twice this many. If the group has + only one internal node then it might have fewer entries.

    +

    This value must be greater than zero.

    +

    + See the description of B-trees below. +

    + +

    + This field is present in version 0 and 1 of the + superblock. +

    +

    Address of Left SiblingO

    File Consistency Flags

    +

    This value contains flags to indicate information about the + consistency of the information contained within the file. + Currently, the following bit flags are defined:

    +
      +
    • Bit 0 set indicates that the file is opened for + write-access.
    • +
    • Bit 1 set indicates that the file has been verified for + consistency and is guaranteed to be consistent with the format + defined in this document.
    • +
    • Bits 2-31 are reserved for future use.
    • +
    Bit 0 should be set as the first action when a file is opened for + write access and should be cleared only as the final action when + closing a file. Bit 1 should be cleared during normal access to a + file and only set after the file’s consistency is guaranteed + by the library or a consistency utility. +

    + +

    + This field is present in version 0+ of the superblock. +

    +

    Address of Right SiblingO

    Indexed Storage Internal Node K

    +

    Each internal node of an indexed storage B-tree will have at + least this many entries but not more than twice this many. If the + index storage B-tree has only one internal node then it might have + fewer entries.

    +

    This value must be greater than zero.

    +

    + See the description of B-trees below. +

    + +

    + This field is present in version 1 of the superblock. +

    +
    Key 0 (variable size)

    Base Address

    +

    This is the absolute file address of the first byte of the + HDF5 data within the file. The library currently constrains this + value to be the absolute file address of the superblock itself when + creating new files; future versions of the library may provide + greater flexibility. When opening an existing file and this address + does not match the offset of the superblock, the library assumes + that the entire contents of the HDF5 file have been adjusted in the + file and adjusts the base address and end of file address to + reflect their new positions in the file. Unless otherwise noted, + all other file addresses are relative to this base address.

    + +

    + This field is present in version 0+ of the superblock. +

    +

    Address of Child 0O

    Address of Global Free-space Index

    +

    + The file’s free space is not persistent for version 0 and 1 + of the superblock. Currently this field always contains the undefined address. +

    + +

    + This field is present in version 0 and 1 of the + superblock. +

    +
    Key 1 (variable size)

    End of File Address

    +

    This is the absolute file address of the first byte past the + end of all HDF5 data. It is used to determine whether a file has + been accidentally truncated and as an address where file data + allocation can occur if space from the free list is not used.

    + +

    + This field is present in version 0+ of the superblock. +

    +

    Address of Child 1O

    Driver Information Block Address

    +

    + This is the relative file address of the file driver information + block which contains driver-specific information needed to reopen + the file. If there is no driver information block then this entry + should be the undefined address. +

    + +

    + This field is present in version 0 and 1 of the + superblock. +

    +
    ...

    Root Group Symbol Table Entry

    +

    + This is the symbol table entry of + the root group, which serves as the entry point into the group + graph for the file. +

    + +

    + This field is present in version 0 and 1 of the + superblock. +

    +
    +
    + +
    +

    Version 2 of the superblock is described below:

    + +
    + + - + + + + - + - + + + + -
    Superblock (Version 2)
    Key 2K (variable size)bytebytebytebyte

    Address of Child 2KO


    Format Signature (8 bytes)
    +
    Key 2K+1 (variable size)Version # of SuperblockSize of OffsetsSize of LengthsFile Consistency Flags
    - - - -
      - (Items marked with an ‘O’ in the above table are of the size - specified in “Size of Offsets” field in the superblock.) -
    - -
    - -
    -
    - - - - - - - - - - - - - - - - - - - + - - - + + - - - + + - - - + + - - - + + +
    Field NameDescription

    Signature

    -

    The ASCII character string “TREE” is - used to indicate the - beginning of a B-link tree node. This gives file - consistency checking utilities a better chance of - reconstructing a damaged file. -

    -

    Node Type

    -

    Each B-link tree points to a particular type of data. - This field indicates the type of data as well as - implying the maximum degree K of the tree and - the size of each Key field. - - - - - - - - - - - - - - - -
    Node TypeDescription
    0This tree points to group nodes.
    1This tree points to raw data chunk nodes.

    -

    Node Level

    -

    The node level indicates the level at which this node - appears in the tree (leaf nodes are at level zero). Not - only does the level indicate whether child pointers - point to sub-trees or to data, but it can also be used - to help file consistency checking utilities reconstruct - damaged trees. -

    -

    Base AddressO
    +

    Entries Used

    -

    This determines the number of children to which this - node points. All nodes of a particular type of tree - have the same maximum degree, but most nodes will point - to less than that number of children. The valid child - pointers and keys appear at the beginning of the node - and the unused pointers and keys appear at the end of - the node. The unused pointers and keys have undefined - values. -

    -

    Superblock Extension AddressO
    +

    Address of Left Sibling

    -

    This is the relative file address of the left sibling of - the current node. If the current - node is the left-most node at this level then this field - is the undefined address. -

    -

    End of File AddressO
    +

    Address of Right Sibling

    -

    This is the relative file address of the right sibling of - the current node. If the current - node is the right-most node at this level then this - field is the undefined address. -

    -

    Root Group Object Header AddressO
    +

    Keys and Child Pointers

    -

    Each tree has 2K+1 keys with 2K - child pointers interleaved between the keys. The number - of keys and child pointers actually containing valid - values is determined by the node’s Entries Used field. - If that field is N then the B-link tree contains - N child pointers and N+1 keys. -

    -
    Superblock Checksum
    - -

    Key

    - -

    The format and size of the key values is determined by - the type of data to which this tree points. The keys are - ordered and are boundaries for the contents of the child - pointer; that is, the key values represented by child - N fall between Key N and Key - N+1. Whether the interval is open or closed on - each end is determined by the type of data to which the - tree points. -

    - -

    - The format of the key depends on the node type. - For nodes of node type 0 (group nodes), the key is formatted as - follows: - - - - - - -
    A single field of Size of Lengths - bytes:Indicates the byte offset into the local heap - for the first object name in the subtree which - that key describes. -
    -

    - - -

    - For nodes of node type 1 (chunked raw data nodes), the key is - formatted as follows: - - - - - - - - - - - - - - -
    Bytes 1-4:Size of chunk in bytes.
    Bytes 4-8:Filter mask, a 32-bit bit field indicating which - filters have been skipped for this chunk. Each filter - has an index number in the pipeline (starting at 0, with - the first filter to apply) and if that filter is skipped, - the bit corresponding to its index is set.
    (D + 1) 64-bit fields:The offset of the - chunk within the dataset where D is the number - of dimensions of the dataset, and the last value is the - offset within the dataset’s datatype and should always be - zero. For example, if - a chunk in a 3-dimensional dataset begins at the - position [5,5,5], there will be three - such 64-bit values, each with the value of - 5, followed by a 0 value.
    -

    - - + + + + +
     (Items marked with an ‘O’ in the + above table are of the size specified in “Size of + Offsets.”)
    - -

    Child Pointer

    - -

    The tree node contains file addresses of subtrees or - data depending on the node level. Nodes at Level 0 point - to data addresses, either raw data chunks or group nodes. - Nodes at non-zero levels point to other nodes of the - same B-tree. -

    -

    For raw data chunk nodes, the child pointer is the address - of a single raw data chunk. For group nodes, the child pointer - points to a symbol table, which contains - information for multiple symbol table entries. -

    - - - -
    - -

    - Conceptually, each B-tree node looks like this:

    -
    - - - - - - - - - - - - - -
    key[0] child[0] key[1] child[1] key[2] ... ... key[N-1] child[N-1] key[N]
    -
    -
    - - where child[i] is a pointer to a sub-tree (at a level - above Level 0) or to data (at Level 0). - Each key[i] describes an item stored by the B-tree - (a chunk or an object of a group node). The range of values - represented by child[i] is indicated by key[i] - and key[i+1]. - - -

    The following question must next be answered: - “Is the value described by key[i] contained in - child[i-1] or in child[i]?” - The answer depends on the type of tree. - In trees for groups (node type 0) the object described by - key[i] is the greatest object contained in - child[i-1] while in chunk trees (node type 1) the - chunk described by key[i] is the least chunk in - child[i].

    - -

    That means that key[0] for group trees is sometimes unused; - it points to offset zero in the heap, which is always the - empty string and compares as “less-than” any valid object name.

    - -

    And key[N] for chunk trees is sometimes unused; - it contains a chunk offset which compares as “greater-than” - any other chunk offset and has a chunk byte size of zero - to indicate that it is not actually allocated.

    - -
    -

    -III.A.2. Disk Format: Level 1A2 - Version 2 B-trees

    - -

    Version 2 B-trees are “traditional” B-trees, with one major difference. - Instead of just using a simple pointer (or address in the file) to a - child of an internal node, the pointer to the child node contains two - additional pieces of information: the number of records in the child - node itself, and the total number of records in the child node and - all its descendants. Storing this additional information allows fast - array-like indexing to locate the nth record in the B-tree.

    - -

    The entry into a version 2 B-tree is a header which contains global - information about the structure of the B-tree. The root node - address - field in the header points to the B-tree root node, which is either an - internal or leaf node, depending on the value in the header’s - depth field. An internal node consists of records plus - pointers to further leaf or internal nodes in the tree. A leaf node - consists of solely of records. The format of the records depends on - the B-tree type (stored in the header).

    - -
    - - + +
    +
    +
    - Version 2 B-tree Header -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    bytebytebytebyte
    Signature
    VersionTypeThis space inserted only to align table nicely
    Node Size
    Record SizeDepth
    Split PercentMerge PercentThis space inserted only to align table nicely

    Root Node AddressO

    Number of Records in Root NodeThis space inserted only to align table nicely

    Total Number of Records in B-treeL

    Checksum
    + Field Name + Description + - - - - - - -
      - (Items marked with an ‘O’ in the above table are of the size - specified in “Size of Offsets” field in the superblock.) -
      - (Items marked with an ‘L’ in the above table are of the size - specified in “Size of Lengths” field in the superblock.) -
    - -
    - -
    -
    - - - - - - - - - - - - - - - - - - - + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - - - - - - - - - - - - -
    Field NameDescription

    Signature

    -

    The ASCII character string “BTHD” is - used to indicate the header of a version 2 B-link tree node. -

    -

    Version

    -

    The version number for this B-tree header. This document - describes version 0. -

    -

    Type

    -

    This field indicates the type of B-tree: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    ValueDescription
    0A “testing” B-tree, this value should not be - used for storing records in actual HDF5 files. -
    1This B-tree is used for indexing indirectly accessed, - non-filtered ‘huge’ fractal heap objects. -
    2This B-tree is used for indexing indirectly accessed, - filtered ‘huge’ fractal heap objects. -
    3This B-tree is used for indexing directly accessed, - non-filtered ‘huge’ fractal heap objects. -
    4This B-tree is used for indexing directly accessed, - filtered ‘huge’ fractal heap objects. -
    5This B-tree is used for indexing the ‘name’ field for - links in indexed groups. -
    6This B-tree is used for indexing the ‘creation order’ - field for links in indexed groups. -
    7This B-tree is used for indexing shared object header - messages. -
    8This B-tree is used for indexing the ‘name’ field for - indexed attributes. -
    9This B-tree is used for indexing the ‘creation order’ - field for indexed attributes. -

    -

    The format of records for each type is described below.

    -

    Format Signature

    +

    This field is the same as described for versions 0 and 1 of + the superblock.

    +

    Node Size

    -

    This is the size in bytes of all B-tree nodes. -

    -

    Version Number of the Superblock

    +

    This field has a value of 2 and has the same meaning as for + versions 0 and 1.

    +

    Record Size

    -

    This field is the size in bytes of the B-tree record. -

    -

    Size of Offsets

    +

    This field is the same as described for versions 0 and 1 of + the superblock.

    +

    Depth

    -

    This is the depth of the B-tree. -

    -

    Size of Lengths

    +

    This field is the same as described for versions 0 and 1 of + the superblock.

    +

    Split Percent

    -

    The percent full that a node needs to increase above before it - is split. -

    -

    File Consistency Flags

    +

    This field is the same as described for versions 0 and 1 + except that it is smaller (the number of reserved bits has been + reduced from 30 to 6).

    +

    Merge Percent

    -

    The percent full that a node needs to be decrease below before it - is split. -

    -

    Base Address

    +

    This field is the same as described for versions 0 and 1 of + the superblock.

    +

    Root Node Address

    -

    This is the address of the root B-tree node. A B-tree with - no records will have the undefined - address in this field. -

    -

    Superblock Extension Address

    +

    + The field is the address of the object header for the superblock extension. If there is no + extension then this entry should be the undefined + address. +

    +

    Number of Records in Root Node

    -

    This is the number of records in the root node. -

    -

    Total Number of Records in B-tree

    -

    This is the total number of records in the entire B-tree. -

    -

    Checksum

    -

    This is the checksum for the B-tree header. -

    -
    -
    - -
    -
    -
    - - - - - - - + + - + + + - - - + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - Version 2 B-tree Internal Node -
    bytebytebytebyte

    End of File Address

    +

    This field is the same as described for versions 0 and 1 of + the superblock.

    +
    Signature

    Root Group Object Header Address

    +

    + This is the address of the root group + object header, which serves as the entry point into the group + graph for the file. +

    +
    VersionTypeRecords 0, 1, 2...N-1 (variable size)

    Superblock Checksum

    +

    The checksum for the superblock.

    +

    Child Node Pointer 0O


    Number of Records N0 for Child Node 0 (variable size)

    Total Number of Records for Child Node 0 (optional, variable size)

    Child Node Pointer 1O


    Number of Records N1 for Child Node 1 (variable size)

    Total Number of Records for Child Node 1 (optional, variable size)
    ...

    Child Node Pointer NO


    Number of Records Nn for Child Node N (variable size)

    Total Number of Records for Child Node N (optional, variable size)
    Checksum
    - - - - -
      - (Items marked with an ‘O’ in the above table are of the size - specified in “Size of Offsets” field in the superblock.) -
    -
    + + +
    +

    + II.B. Disk Format: Level 0B - File Driver + Info +

    + +

    + The driver information block is an optional region of the file + which contains information needed by the file driver to reopen a file. + The format is described below: +

    -
    -
    - - - - - - - - - +
    +
    Field NameDescription

    Signature

    -

    The ASCII character string “BTIN” is - used to indicate the internal node of a B-link tree. -

    -
    + - - + + + + - - + + - - + - - + - - - - - - - - - - - - + +
    Driver Information Block

    Version

    -

    The version number for this B-tree internal node. - This document describes version 0. -

    -
    bytebytebytebyte

    Type

    -

    This field is the type of the B-tree node. It should always - be the same as the B-tree type in the header. -

    -
    VersionReserved

    Records

    -

    The size of this field is determined by the number of records - for this node and the record size (from the header). The format - of records depends on the type of B-tree. -

    -
    Driver Information Size

    Child Node Pointer

    -

    This field is the address of the child node pointed to by the - internal node. -

    -

    Driver Identification (8 bytes)
    +

    Number of Records in Child Node

    -

    This is the number of records in the child node pointed to by - the corresponding Node Pointer. -

    -

    The number of bytes used to store this field is determined by - the maximum possible number of records able to be stored in the - child node. -

    -

    - The maximum number of records in a child node is computed - in the following way: - -

      -
    • Subtract the fixed size overhead for - the child node (for example, its signature, version, - checksum, and so on and one pointer triplet - of information for the child node (because there is one - more pointer triplet than records in each internal node)) - from the size of nodes for the B-tree.
    • -
    • Divide that result by the size of a record plus the - pointer triplet of information stored to reach each - child node from this node. -
    - -

    -

    - Note that leaf nodes do not encode any - child pointer triplets, so the maximum number of records in a - leaf node is just the node size minus the leaf node overhead, - divided by the record size. -

    -

    - Also note that the first level of internal nodes above the - leaf nodes do not encode the Total Number of Records in Child - Node value in the child pointer triplets (since it is the - same as the Number of Records in Child Node), so the - maximum number of records in these nodes is computed with the - equation above, but using (Child Pointer, Number of - Records in Child Node) pairs instead of triplets. -

    -

    - The number of - bytes used to encode this field is the least number of bytes - required to encode the maximum number of records in a child - node value for the child nodes below this level - in the B-tree. -

    -

    - For example, if the maximum number of child records is - 123, one byte will be used to encode these values in this - node; if the maximum number of child records is - 20000, two bytes will be used to encode these values in this - node; and so on. The maximum number of bytes used to - encode these values is 8 (in other words, an unsigned - 64-bit integer). -

    -

    Total Number of Records in Child Node

    -

    This is the total number of records for the node pointed to by - the corresponding Node Pointer and all its children. - This field exists only in nodes whose depth in the B-tree node - is greater than 1 (in other words, the “twig” - internal nodes, just above leaf nodes, do not store this - field in their child node pointers). -

    -

    The number of bytes used to store this field is determined by - the maximum possible number of records able to be stored in the - child node and its descendants. -

    -

    - The maximum possible number of records able to be stored in a - child node and its descendants is computed iteratively, in the - following way: The maximum number of records in a leaf node - is computed, then that value is used to compute the maximum - possible number of records in the first level of internal nodes - above the leaf nodes. Multiplying these two values together - determines the maximum possible number of records in child node - pointers for the level of nodes two levels above leaf nodes. - This process is continued up to any level in the B-tree. -

    -

    - The number of bytes used to encode this value is computed in - the same way as for the Number of Records in Child Node - field. -

    -

    Checksum

    -

    This is the checksum for this node. -

    -

    +
    Driver Information (variable size)
    +
    +
    +
    - - - -
    -
    -
    - - - +
    +
    +
    - Version 2 B-tree Leaf Node -
    - - - - + + - + + - - - - - - - - -
    bytebytebytebyteField NameDescription
    Signature

    Version

    +

    The version number of the Driver Information Block. This + document describes version 0.

    +
    VersionTypeRecord 0, 1, 2...N-1 (variable size)
    Checksum
    -
    -
    -
    - - - - + + + - - + + - - - + + + +
    Field NameDescription

    Driver Information Size

    +

    + The size in bytes of the Driver Information field. +

    +

    Signature

    -

    The ASCII character string “BTLF“ is - used to indicate the leaf node of a version 2 B-link tree. -

    -

    Driver Identification

    +

    + This is an eight-byte ASCII string without null termination which + identifies the driver and/or version number of the Driver + Information Block. The predefined driver encoded in this field by + the HDF5 Library is identified by the letters + NCSA + followed by the first four characters of the driver name. If the + Driver Information block is not the original version then the last + letter(s) of the identification will be replaced by a version + number in ASCII, starting with 0. +

    +

    Identification for user-defined drivers is also eight-byte + long. It can be arbitrary but should be unique to avoid the four + character prefix “NCSA”.

    +

    Version

    -

    The version number for this B-tree leaf node. - This document describes version 0. -

    -

    Driver Information

    Driver information is stored in a format defined by the file + driver (see description below).
    +
    + +
    The two drivers encoded in the +Driver Identification field are as follows: + +

    + The format of the Driver Information field for the above two + drivers are described below: +

    - -

    Type

    - -

    This field is the type of the B-tree node. It should always - be the same as the B-tree type in the header. -

    - - +
    + + - - + + + + - - + + + + -
    Multi Driver Information

    Records

    -

    The size of this field is determined by the number of records - for this node and the record size (from the header). The format - of records depends on the type of B-tree. -

    -
    bytebytebytebyte

    Checksum

    -

    This is the checksum for this node. -

    -
    Member MappingMember MappingMember MappingMember Mapping
    -
    - -
    -

    The record layout for each stored (in other words, non-testing) - B-tree type is as follows:

    - -
    - - - - - - - - - - - - - - - - - + + + + -
    - Version 2 B-tree, Type 1 Record Layout - Indirectly Accessed, Non-Filtered, - ‘Huge’ Fractal Heap Objects -
    bytebytebytebyte

    Huge Object AddressO


    Huge Object LengthL


    Huge Object IDL

    Member MappingMember MappingReservedReserved
    - - - - - - -
      - (Items marked with an ‘O’ in the above table are of the size - specified in “Size of Offsets” field in the superblock.) -
      - (Items marked with an ‘L’ in the above table are of the size - specified in “Size of Lengths” field in the superblock.) -
    - -
    - -
    -
    - - - - + - - + - - + - - + -
    Field NameDescription
    Address of Member File 1
    +

    Huge Object Address

    -

    The address of the huge object in the file. -

    -

    End of Address for Member File 1
    +

    Huge Object Length

    -

    The length of the huge object in the file. -

    -

    Address of Member File 2
    +

    Huge Object ID

    -

    The heap ID for the huge object. -

    -

    End of Address for Member File 2
    +
    -
    - -
    -
    -
    - - - - - - - + - + + - + + - + + - + + - + -
    - Version 2 B-tree, Type 2 Record Layout - Indirectly Accessed, Filtered, - ‘Huge’ Fractal Heap Objects -
    bytebytebytebyte
    ... ...
    +

    Filtered Huge Object AddressO


    Address of Member File N
    +

    Filtered Huge Object LengthL


    End of Address for Member File N
    +
    Filter Mask
    Name of Member File 1 (variable + size)
    +

    Filtered Huge Object Memory SizeL


    Name of Member File 2 (variable + size)
    +

    Huge Object IDL


    ... ...
    +
    - - - - - - -
      - (Items marked with an ‘O’ in the above table are of the size - specified in “Size of Offsets” field in the superblock.) -
      - (Items marked with an ‘L’ in the above table are of the size - specified in “Size of Lengths” field in the superblock.) -
    +
    Name of Member File N (variable + size)
    +
    + -
    + + -
    -
    - - - - +
    +
    +
    Field NameDescription
    + + + - - + + - - + + - - + + - - + + - - + + +
    Field NameDescription

    Filtered Huge Object Address

    -

    The address of the filtered huge object in the file. -

    -

    Member Mapping

    These fields are integer values from 1 to 6 + indicating how the data can be mapped to or merged with another + type of data.

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Member MappingDescription
    1The superblock data.
    2The B-tree data.
    3The raw data.
    4The global heap data.
    5The local heap data.
    6The object header data.
    +

    +

    For example, if the third field has the value 3 and all the + rest have the value 1, it means there are two files: one for raw + data, and one for superblock, B-tree, global heap, local heap, and + object header.

    Filtered Huge Object Length

    -

    The length of the filtered huge object in the file. -

    -

    Reserved

    These fields are reserved and should always be zero.

    Filter Mask

    -

    A 32-bit bit field indicating which filters have been skipped for - this chunk. Each filter has an index number in the pipeline - (starting at 0, with the first filter to apply) and if that - filter is skipped, the bit corresponding to its index is set. -

    -

    Address of Member File N

    This field Specifies the virtual address at which the + member file starts.

    +

    N is the number of member files.

    Filtered Huge Object Memory Size

    -

    The size of the de-filtered huge object in memory. -

    -

    End of Address for Member File N

    This field is the end of the allocated address for + the member file.

    Huge Object ID

    -

    The heap ID for the huge object. -

    -

    Name of Member File N

    + This field is the null-terminated name of the member file and its + length should be multiples of 8 bytes. Additional bytes will be + padded with NULLs. The default naming convention is %s-X.h5, + where X is one of the letters s (for superblock), + b (for B-tree), r (for raw data), g (for + global heap), l (for local heap), and o (for + object header). The name of the whole HDF5 file will substitute the + %s in the string. +

    +
    - - - -
    -
    -
    - - - - - - - - - +
    +
    +
    - Version 2 B-tree, Type 3 Record Layout - Directly Accessed, Non-Filtered, - ‘Huge’ Fractal Heap Objects -
    bytebytebytebyte
    + - - - - + + + + -
    Family Driver Information

    Huge Object AddressO


    Huge Object LengthL

    bytebytebytebyte
    - - - - - - -
      - (Items marked with an ‘O’ in the above table are of the size - specified in “Size of Offsets” field in the superblock.) -
      - (Items marked with an ‘L’ in the above table are of the size - specified in “Size of Lengths” field in the superblock.) -
    +
    Size of Member File
    +
    + -
    + + -
    -
    - +
    +
    +
    - - + + - - + + +
    Field NameDescriptionField NameDescription

    Huge Object Address

    -

    The address of the huge object in the file. -

    -

    Size of Member File

    This field is the size of the member file in the + family of files.

    +
    - -

    Huge Object Length

    - -

    The length of the huge object in the file. -

    - - +
    +

    + II.C. Disk Format: Level 0C - Superblock + Extension +

    + +

    + The superblock extension is used to store superblock metadata + which is either optional, or added after the version of the superblock + was defined. Superblock extensions may only exist when version 2+ of + superblock is used. A superblock extension is an object header which + may hold the following messages: +

    + - - -
    -
    -
    - - - - - - - - +
    +
    +
    +

    + III. Disk Format: Level 1 - File + Infrastructure +

    - - - - - - - - - - - - -
    - Version 2 B-tree, Type 4 Record Layout - Directly Accessed, Filtered, - ‘Huge’ Fractal Heap Objects -
    bytebytebytebyte

    Filtered Huge Object AddressO


    Filtered Huge Object LengthL

    Filter Mask

    Filtered Huge Object Memory SizeL

    +
    +

    + III.A. Disk Format: Level 1A - B-trees and B-tree + Nodes +

    + +

    B-trees allow flexible storage for objects which tend to grow in + ways that cause the object to be stored discontiguously. B-trees are + described in various algorithms books including “Introduction to + Algorithms” by Thomas H. Cormen, Charles E. Leiserson, and Ronald + L. Rivest. B-trees are used in several places in the HDF5 file format, + when an index is needed for another data structure.

    + +

    The version 1 B-tree structure described below is the original + index structure, but are limited by some bugs in our implementation + (mainly in how they handle deleting records). The version 1 B-trees are + being phased out in favor of the version 2 B-trees described below, + although both types of structures may be found in the same file, + depending on application settings when creating the file.

    - - - - - - - -
      - (Items marked with an ‘O’ in the above table are of the size - specified in “Size of Offsets” field in the superblock.) -
      - (Items marked with an ‘L’ in the above table are of the size - specified in “Size of Lengths” field in the superblock.) -
    +
    +

    + III.A.1. Disk Format: Level 1A1 - Version 1 + B-trees (B-link Trees) +

    + +

    + Version 1 B-trees in HDF5 files an implementation of the B-link tree, + in which the sibling nodes at a particular level in the tree are stored + in a doubly-linked list, is described in the “Efficient Locking + for Concurrent Operations on B-trees” paper by Phillip Lehman and + S. Bing Yao as published in the ACM Transactions on + Database Systems, Vol. 6, No. 4, December 1981. +

    -
    +

    The B-link trees implemented by the file format contain one more + key than the number of children. In other words, each child pointer out + of a B-tree node has a left key and a right key. The pointers out of + internal nodes point to sub-trees while the pointers out of leaf nodes + point to symbol nodes and raw data chunks. Aside from that difference, + internal nodes and leaf nodes are identical.

    -
    -
    - - - - - +
    +
    Field NameDescription
    + - - + + + + - - + - - + + + - - + -
    B-link Tree Nodes

    Filtered Huge Object Address

    -

    The address of the filtered huge object in the file. -

    -
    bytebytebytebyte

    Filtered Huge Object Length

    -

    The length of the filtered huge object in the file. -

    -
    Signature

    Filter Mask

    -

    A 32-bit bit field indicating which filters have been skipped for - this chunk. Each filter has an index number in the pipeline - (starting at 0, with the first filter to apply) and if that - filter is skipped, the bit corresponding to its index is set. -

    -
    Node TypeNode LevelEntries Used

    Filtered Huge Object Memory Size

    -

    The size of the de-filtered huge object in memory. -

    -

    Address of Left SiblingO
    +
    -
    - -
    -
    -
    - - - - - - - - - - - + - - - - - - -
    - Version 2 B-tree, Type 5 Record Layout - Link Name for Indexed Group -
    bytebytebytebyte
    Hash of Name
    Address of Right SiblingO
    +
    ID (bytes 1-4)
    ID (bytes 5-7)
    -
    - -
    -
    - - - - + - - + - - + -
    Field NameDescriptionKey 0 (variable size)

    Hash

    -

    This field is hash value of the name for the link. The hash - value is the Jenkins’ lookup3 checksum algorithm applied to - the link’s name. -

    -

    Address of Child 0O
    +

    ID

    -

    This is a 7-byte sequence of bytes and is the heap ID for the - link record in the group’s fractal heap.

    -
    Key 1 (variable size)
    -
    - -
    -
    -
    - - - - - - - + - - - - + + - + -
    - Version 2 B-tree, Type 6 Record Layout - Creation Order for Indexed Group -
    bytebytebytebyte
    Address of Child 1O
    +

    Creation Order (8 bytes)

    ID (bytes 1-4)...
    ID (bytes 5-7)Key 2K (variable size) +
    -
    -
    -
    - - - + - - + +
    Field NameDescription
    Address of Child 2KO
    +

    Creation Order

    -

    This field is the creation order value for the link. -

    -
    Key 2K+1 (variable size) +
    + - - + + +

    ID

    -

    This is a 7-byte sequence of bytes and is the heap ID for the - link record in the group’s fractal heap.

    -
     (Items marked with an ‘O’ in the + above table are of the size specified in “Size of + Offsets” field in the superblock.)
    - -
    - -
    -
    -
    - - + +
    +
    +
    - Version 2 B-tree, Type 7 Record Layout - Shared Object Header Messages (Sub-Type 0 - Message in Heap) -
    - - - - + + - - - - - - - - - - - + + -
    bytebytebytebyteField NameDescription
    Message LocationThis space inserted only to align table nicely
    Hash
    Reference Count

    Heap ID (8 bytes)

    Signature

    +

    + The ASCII character string “ + TREE + ” is used to indicate the beginning of a B-link tree node. + This gives file consistency checking utilities a better chance of + reconstructing a damaged file. +

    +
    -
    -
    -
    - - - + + - - + + - - - + + + - - - + + + - - - + + + -
    Field NameDescription

    Node Type

    +

    + Each B-link tree points to a particular type of data. This field + indicates the type of data as well as implying the maximum degree K + of the tree and the size of each Key field. + + +

    + + + + + + + + + + + + + +
    Node TypeDescription
    0This tree points to group nodes.
    1This tree points to raw data chunk nodes.
    +

    +

    Message Location

    -

    This field Indicates the location where the message is stored: - - - - - - - - - - - - - -
    ValueDescription
    0Shared message is stored in shared message index heap. -
    1Shared message is stored in object header. -

    -

    Node Level

    +

    The node level indicates the level at which this node appears + in the tree (leaf nodes are at level zero). Not only does the level + indicate whether child pointers point to sub-trees or to data, but + it can also be used to help file consistency checking utilities + reconstruct damaged trees.

    +

    Hash

    -

    This field is hash value of the shared message. The hash - value is the Jenkins’ lookup3 checksum algorithm applied to - the shared message.

    -

    Entries Used

    +

    This determines the number of children to which this node + points. All nodes of a particular type of tree have the same + maximum degree, but most nodes will point to less than that number + of children. The valid child pointers and keys appear at the + beginning of the node and the unused pointers and keys appear at + the end of the node. The unused pointers and keys have undefined + values.

    +

    Reference Count

    -

    The number of objects which reference this message.

    -

    Address of Left Sibling

    +

    + This is the relative file address of the left sibling of the + current node. If the current node is the left-most node at this + level then this field is the undefined + address. +

    +

    Heap ID

    -

    This is an 8-byte sequence of bytes and is the heap ID for the - shared message in the shared message index’s fractal heap.

    -

    Address of Right Sibling

    +

    + This is the relative file address of the right sibling of the + current node. If the current node is the right-most node at this + level then this field is the undefined + address. +

    +
    -
    + +

    Keys and Child Pointers

    + +

    + Each tree has 2K+1 keys with 2K child pointers + interleaved between the keys. The number of keys and child pointers + actually containing valid values is determined by the node’s + Entries Used field. If that field is N then the + B-link tree contains N child pointers and N+1 + keys. +

    + + -
    -
    -
    - - + + + - - - - - +

    The format of the key depends on the node type. For nodes of + node type 0 (group nodes), the key is formatted as follows:

    +
    - Version 2 B-tree, Type 7 Record Layout - Shared Object Header Messages (Sub-Type 1 - Message in Object Header) -

    Key

    +

    + The format and size of the key values is determined by the type of + data to which this tree points. The keys are ordered and are + boundaries for the contents of the child pointer; that is, the key + values represented by child N fall between Key N + and Key N+1. Whether the interval is open or closed on + each end is determined by the type of data to which the tree + points. +

    -
    bytebytebytebyte
    + + + + +
    A single field of Size of Lengths + bytes: + Indicates the byte offset into the local heap + for the first object name in the subtree which that key + describes.
    +

    - - Message Location - This space inserted only to align table nicely - - - Hash + +

    For nodes of node type 1 (chunked raw data nodes), the key is + formatted as follows:

    + + + + + + + + + + + + + +
    Bytes 1-4:Size of chunk in bytes.
    Bytes 4-8:Filter mask, a 32-bit bit field indicating which filters + have been skipped for this chunk. Each filter has an index number + in the pipeline (starting at 0, with the first filter to apply) + and if that filter is skipped, the bit corresponding to its index + is set.
    (D + 1) 64-bit fields: + The offset of the chunk within the dataset where D + is the number of dimensions of the dataset, and the last value is + the offset within the dataset’s datatype and should always + be zero. For example, if a chunk in a 3-dimensional dataset + begins at the position [5,5,5], there will be three + such 64-bit values, each with the value of 5, + followed by a 0 value. +
    +

    + + - - Reserved (zero) - Message Type - Object Header Index + + +

    Child Pointer

    + +

    The tree node contains file addresses of subtrees or data + depending on the node level. Nodes at Level 0 point to data + addresses, either raw data chunks or group nodes. Nodes at non-zero + levels point to other nodes of the same B-tree.

    +

    + For raw data chunk nodes, the child pointer is the address of a + single raw data chunk. For group nodes, the child pointer points to + a symbol table, which contains + information for multiple symbol table entries. +

    + - -
    Object Header AddressO

    + +
    + +

    Conceptually, each B-tree node looks like this:

    +
    + + + + + + + + + + + + + + + + + + + + + -
    key[0] child[0] key[1] child[1] key[2] ... ... key[N-1] +  child[N-1] +  key[N] +
    + +
    +
    where child[ +i] is a pointer to a sub-tree (at a level above Level 0) or to +data (at Level 0). Each key[ +i] describes an +item stored by the B-tree (a chunk or an object of a group node). +The range of values represented by child[ +i] is indicated by key[ +i] and key[ +i+1]. + + +

    + The following question must next be answered: “Is the value + described by key[i] contained in child[i-1] or in child[i]?” + The answer depends on the type of tree. In trees for groups (node type + 0) the object described by key[i] is the greatest object + contained in child[i-1] while in chunk trees (node type 1) the + chunk described by key[i] is the least chunk in child[i]. +

    - - - - -
      - (Items marked with an ‘O’ in the above table are of the size - specified in “Size of Offsets” field in the superblock.) -
    +

    That means that key[0] for group trees is sometimes unused; it + points to offset zero in the heap, which is always the empty string and + compares as “less-than” any valid object name.

    - +

    + And key[N] for chunk trees is sometimes unused; it contains a + chunk offset which compares as “greater-than” any other + chunk offset and has a chunk byte size of zero to indicate that it is + not actually allocated. +

    -
    -
    - - - - - +
    +

    + III.A.2. Disk Format: Level 1A2 - Version 2 + B-trees +

    + +

    + Version 2 B-trees are “traditional” B-trees, with one major + difference. Instead of just using a simple pointer (or address in the + file) to a child of an internal node, the pointer to the child node + contains two additional pieces of information: the number of records in + the child node itself, and the total number of records in the child + node and all its descendants. Storing this additional information + allows fast array-like indexing to locate the nth record in + the B-tree. +

    - - - - +

    + The entry into a version 2 B-tree is a header which contains global + information about the structure of the B-tree. The root node + address field in the header points to the B-tree root node, which is + either an internal or leaf node, depending on the value in the + header’s depth field. An internal node consists of + records plus pointers to further leaf or internal nodes in the tree. A + leaf node consists of solely of records. The format of the records + depends on the B-tree type (stored in the header). +

    - - - - +
    +
    Field NameDescription

    Message Location

    -

    This field Indicates the location where the message is stored: - - - - - - - - - - - - - -
    ValueDescription
    0Shared message is stored in shared message index heap. -
    1Shared message is stored in object header. -

    -

    Hash

    -

    This field is hash value of the shared message. The hash - value is the Jenkins’ lookup3 checksum algorithm applied to - the shared message.

    -
    + - - - + + + + - - - - - - + - -
    Version 2 B-tree Header

    Message Type

    -

    The object header message type of the shared message.

    -
    bytebytebytebyte

    Object Header Index

    -

    This field indicates that the shared message is the nth message - of its type in the specified object header.

    -

    Object Header Address

    -

    The address of the object header containing the shared message.

    -
    Signature
    -
    - -
    -
    -
    - - - - - - - + + + - - + - - + + - + + + - + -
    - Version 2 B-tree, Type 8 Record Layout - Attribute Name for Indexed Attributes -
    bytebytebytebyteVersionTypeThis space inserted + only to align table nicely

    Heap ID (8 bytes)

    Node Size
    Message FlagsThis space inserted only to align table nicelyRecord SizeDepth
    Creation OrderSplit PercentMerge PercentThis space inserted + only to align table nicely
    Hash of Name
    Root Node AddressO
    +
    -
    - -
    -
    - - - + + - - - + - - - + +
    Field NameDescriptionNumber of Records in Root NodeThis space inserted + only to align table nicely

    Heap ID

    -

    This is an 8-byte sequence of bytes and is the heap ID for the - attribute in the object’s attribute fractal heap.

    -

    Total Number of Records in B-treeL
    +

    Message Flags

    The object header message flags for the attribute message.

    -
    Checksum
    + - - + + - - - + + +

    Creation Order

    -

    This field is the creation order value for the attribute. -

    -
     (Items marked with an ‘O’ in the + above table are of the size specified in “Size of + Offsets” field in the superblock.)

    Hash

    -

    This field is hash value of the name for the attribute. The hash - value is the Jenkins’ lookup3 checksum algorithm applied to - the attribute’s name. -

    -
     (Items marked with an ‘L’ in the above table are + of the size specified in “Size of Lengths” field in the + superblock.)
    - -
    - -
    -
    -
    - - + +
    +
    +
    - Version 2 B-tree, Type 9 Record Layout- Creation Order for Indexed Attributes -
    - - - - + + - - - - - + + + - + + -
    bytebytebytebyteField NameDescription

    Heap ID (8 bytes)

    Message FlagsThis space inserted only to align table nicely

    Signature

    +

    + The ASCII character string “ + BTHD + ” is used to indicate the header of a version 2 B-link tree + node. +

    +
    Creation Order

    Version

    +

    The version number for this B-tree header. This document + describes version 0.

    +
    -
    -
    -
    - - - + + - - - + + + - - - + + + - - - + + + -
    Field NameDescription

    Type

    +

    This field indicates the type of B-tree:

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    ValueDescription
    0A “testing” B-tree, this value should not + be used for storing records in actual HDF5 files. +
    1This B-tree is used for indexing indirectly accessed, + non-filtered ‘huge’ fractal heap objects.
    2This B-tree is used for indexing indirectly accessed, + filtered ‘huge’ fractal heap objects.
    3This B-tree is used for indexing directly accessed, + non-filtered ‘huge’ fractal heap objects.
    4This B-tree is used for indexing directly accessed, + filtered ‘huge’ fractal heap objects.
    5This B-tree is used for indexing the ‘name’ + field for links in indexed groups.
    6This B-tree is used for indexing the ‘creation + order’ field for links in indexed groups.
    7This B-tree is used for indexing shared object header + messages.
    8This B-tree is used for indexing the ‘name’ + field for indexed attributes.
    9This B-tree is used for indexing the ‘creation + order’ field for indexed attributes.
    +

    +

    The format of records for each type is described below.

    +

    Heap ID

    -

    This is an 8-byte sequence of bytes and is the heap ID for the - attribute in the object’s attribute fractal heap.

    -

    Node Size

    +

    This is the size in bytes of all B-tree nodes.

    +

    Message Flags

    -

    The object header message flags for the attribute message.

    -

    Record Size

    +

    This field is the size in bytes of the B-tree record.

    +

    Creation Order

    -

    This field is the creation order value for the attribute. -

    -

    Depth

    +

    This is the depth of the B-tree.

    +
    -
    + +

    Split Percent

    + +

    The percent full that a node needs to increase above before + it is split.

    + + + +

    Merge Percent

    + +

    The percent full that a node needs to be decrease below + before it is split.

    + + -
    -

    -III.B. Disk Format: Level 1B - Group Symbol Table Nodes

    + +

    Root Node Address

    + +

    + This is the address of the root B-tree node. A B-tree with no + records will have the undefined + address in this field. +

    + + -

    A group is an object internal to the file that allows - arbitrary nesting of objects within the file (including other groups). - A group maps a set of link names in the group to a set of relative - file addresses of objects in the file. Certain metadata for an object to - which the group points can be cached in the group’s symbol table entry in - addition to being in the object’s header.

    + +

    Number of Records in Root Node

    + +

    This is the number of records in the root node.

    + + -

    An HDF5 object name space can be stored hierarchically by - partitioning the name into components and storing each - component as a link in a group. The link for a - non-ultimate component points to the group containing - the next component. The link for the last - component points to the object being named.

    + +

    Total Number of Records in B-tree

    + +

    This is the total number of records in the entire B-tree.

    + + -

    One implementation of a group is a collection of symbol table nodes - indexed by a B-link tree. Each symbol table node contains entries - for one or more links. If an attempt is made to add a link to an already - full symbol table node containing 2K entries, then the node is - split and one node contains K symbols and the other contains - K+1 symbols.

    + +

    Checksum

    + +

    This is the checksum for the B-tree header.

    + + + + -
    - - +
    +
    +
    +
    - Symbol Table Node (A Leaf of a B-link tree) -
    + - - - - + + + + - + - - - - + + + - - + -
    Version 2 B-tree Internal Node
    bytebytebytebytebytebytebytebyte
    SignatureSignature
    Version NumberReserved (zero)Number of SymbolsVersionTypeRecords 0, 1, 2...N-1 (variable size)


    Group Entries



    Child Node Pointer 0O
    +
    -
    - -
    -
    - - - + - - - + - - - + - + + - - + - - - + -
    Field NameDescription
    Number of Records N0 for Child + Node 0 (variable size)

    Signature

    -

    The ASCII character string “SNOD” is - used to indicate the - beginning of a symbol table node. This gives file - consistency checking utilities a better chance of - reconstructing a damaged file. -

    -

    Total Number of Records for Child Node 0 + (optional, variable size)

    Version Number

    -

    The version number for the symbol table node. This - document describes version 1. (There is no version ‘0’ - of the symbol table node) -

    -

    Child Node Pointer 1O
    +

    Number of Records N1 for Child + Node 1 (variable size)

    Number of Entries

    -

    Although all symbol table nodes have the same length, - most contain fewer than the maximum possible number of - link entries. This field indicates how many entries - contain valid data. The valid entries are packed at the - beginning of the symbol table node while the remaining - entries contain undefined values. -

    -

    Total Number of Records for Child Node 1 + (optional, variable size)

    Symbol Table Entries

    -

    Each link has an entry in the symbol table node. - The format of the entry is described below. - There are 2K entries in each group node, where - K is the “Group Leaf Node K” value from the - superblock. -

    -
    ...
    -
    - -
    -

    -III.C. Disk Format: Level 1C - Symbol Table Entry

    - -

    Each symbol table entry in a symbol table node is designed - to allow for very fast browsing of stored objects. - Toward that design goal, the symbol table entries - include space for caching certain constant metadata from the - object header.

    - -
    - - - - - - - + - - + - - + - - + +
    - Symbol Table Entry -
    bytebytebytebyte
    Child Node Pointer NO
    +

    Link Name OffsetO


    Number of Records Nn for Child + Node N (variable size)

    Object Header AddressO


    Total Number of Records for Child Node N + (optional, variable size)
    Cache TypeChecksum
    + - + + +
    Reserved (zero) (Items marked with an ‘O’ in the + above table are of the size specified in “Size of + Offsets” field in the superblock.)
    +
    + +
    +
    + - + + -


    Scratch-pad Space (16 bytes)


    Field NameDescription
    - - - -
      - (Items marked with an ‘O’ in the above table are of the size - specified in “Size of Offsets” field in the superblock.) -
    - -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + -
    Field NameDescription

    Link Name Offset

    -

    This is the byte offset into the group’s local - heap for the name of the link. The name is null - terminated. -

    -

    Object Header Address

    -

    Every object has an object header which serves as a - permanent location for the object’s metadata. In addition - to appearing in the object header, some of the object’s metadata - can be cached in the scratch-pad space. -

    -

    Cache Type

    -

    The cache type is determined from the object header. - It also determines the format for the scratch-pad space: - - - - - - - - - - - - - - - - - - -
    TypeDescription
    0No data is cached by the group entry. This - is guaranteed to be the case when an object header - has a link count greater than one. -
    1Group object header metadata is cached in the - scratch-pad space. This implies that the symbol table - entry refers to another group. -
    2The entry is a symbolic link. The first four bytes - of the scratch-pad space are the offset into the local - heap for the link value. The object header address - will be undefined. -

    - -

    Reserved

    -

    These four bytes are present so that the scratch-pad - space is aligned on an eight-byte boundary. They are - always set to zero. -

    -

    Scratch-pad Space

    -

    This space is used for different purposes, depending - on the value of the Cache Type field. Any metadata - about an object represented in the scratch-pad - space is duplicated in the object header for that - object. -

    -

    - Furthermore, no data is cached in the group - entry scratch-pad space if the object header for - the object has a link count greater than one. -

    -

    Signature

    +

    + The ASCII character string “ + BTIN + ” is used to indicate the internal node of a B-link tree. +

    +
    -
    - -
    -

    Format of the Scratch-pad Space

    - -

    The symbol table entry scratch-pad space is formatted - according to the value in the Cache Type field.

    - -

    If the Cache Type field contains the value zero - (0) then no information is - stored in the scratch-pad space.

    - -

    If the Cache Type field contains the value one - (1), then the scratch-pad space - contains cached metadata for another object header - in the following format:

    - -
    - - - - - - + + - + + - + + -
    - Object Header Scratch-pad Format -
    bytebytebytebyte

    Version

    +

    The version number for this B-tree internal node. This + document describes version 0.

    +

    Address of B-treeO

    Type

    +

    This field is the type of the B-tree node. It should always + be the same as the B-tree type in the header.

    +

    Address of Name HeapO

    Records

    +

    The size of this field is determined by the number of records + for this node and the record size (from the header). The format of + records depends on the type of B-tree.

    +
    - - - -
      - (Items marked with an ‘O’ in the above table are of the size - specified in “Size of Offsets” field in the superblock.) -
    - -
    +

    Child Node Pointer

    + +

    This field is the address of the child node pointed to by the + internal node.

    + + -
    -
    - - - + + - - + + - - + + -
    Field NameDescription

    Number of Records in Child Node

    +

    + This is the number of records in the child node pointed to by the + corresponding Node Pointer. +

    +

    The number of bytes used to store this field is determined by + the maximum possible number of records able to be stored in the + child node.

    +

    The maximum number of records in a child node is computed in + the following way:

    +
      +
    • Subtract the fixed size overhead for the child node (for + example, its signature, version, checksum, and so on and one + pointer triplet of information for the child node (because there + is one more pointer triplet than records in each internal node)) + from the size of nodes for the B-tree. +
    • +
    • Divide that result by the size of a record plus the + pointer triplet of information stored to reach each child node + from this node.
    • +
    + +

    +

    Note that leaf nodes do not encode any child pointer + triplets, so the maximum number of records in a leaf node is just + the node size minus the leaf node overhead, divided by the record + size.

    +

    + Also note that the first level of internal nodes above the leaf + nodes do not encode the Total Number of Records in Child + Node value in the child pointer triplets (since it is the same as + the Number of Records in Child Node), so the maximum + number of records in these nodes is computed with the equation + above, but using (Child Pointer, Number of + Records in Child Node) pairs instead of triplets. +

    +

    The number of bytes used to encode this field is the least + number of bytes required to encode the maximum number of records in + a child node value for the child nodes below this level in the + B-tree.

    +

    For example, if the maximum number of child records is 123, + one byte will be used to encode these values in this node; if the + maximum number of child records is 20000, two bytes will be used to + encode these values in this node; and so on. The maximum number of + bytes used to encode these values is 8 (in other words, an unsigned + 64-bit integer).

    +

    Address of B-tree

    -

    This is the file address for the root of the - group’s B-tree. -

    -

    Total Number of Records in Child Node

    +

    + This is the total number of records for the node pointed to by the + corresponding Node Pointer and all its children. This + field exists only in nodes whose depth in the B-tree node is + greater than 1 (in other words, the “twig” internal + nodes, just above leaf nodes, do not store this field in their + child node pointers). +

    +

    The number of bytes used to store this field is determined by + the maximum possible number of records able to be stored in the + child node and its descendants.

    +

    The maximum possible number of records able to be stored in a + child node and its descendants is computed iteratively, in the + following way: The maximum number of records in a leaf node is + computed, then that value is used to compute the maximum possible + number of records in the first level of internal nodes above the + leaf nodes. Multiplying these two values together determines the + maximum possible number of records in child node pointers for the + level of nodes two levels above leaf nodes. This process is + continued up to any level in the B-tree.

    +

    + The number of bytes used to encode this value is computed in the + same way as for the Number of Records in Child Node field. +

    +

    Address of Name Heap

    -

    This is the file address for the group’s local - heap, in which are stored the group’s symbol names. -

    -

    Checksum

    +

    This is the checksum for this node.

    +
    -
    + + -
    -

    If the Cache Type field contains the value two - (2), then the scratch-pad space - contains cached metadata for a symbolic link - in the following format:

    - -
    - - +
    +
    +
    +
    - Symbolic Link Scratch-pad Format -
    + - - - - + + + + - + -
    Version 2 B-tree Leaf Node
    bytebytebytebytebytebytebytebyte
    Offset to Link ValueSignature
    -
    - -
    -
    - - - + + + - - - + -
    Field NameDescriptionVersionTypeRecord 0, 1, 2...N-1 (variable size)

    Offset to Link Value

    -

    The value of a symbolic link (that is, the name of the - thing to which it points) is stored in the local heap. - This field is the 4-byte offset into the local heap for - the start of the link value, which is null terminated. -

    -
    Checksum
    -
    + +
    -

    -III.D. Disk Format: Level 1D - Local Heaps

    - -

    A local heap is a collection of small pieces of data that are particular - to a single object in the HDF5 file. Objects can be - inserted and removed from the heap at any time. - The address of a heap does not change once the heap is created. - For example, a group stores addresses of objects in symbol table nodes - with the names of links stored in the group’s local heap. -

    - -
    - - - +
    +
    - Local Heap -
    - - - - + + - + + - - + + - + + - + + - + + -
    bytebytebytebyteField NameDescription
    Signature

    Signature

    +

    + The ASCII character string “ + BTLF + “ is used to indicate the leaf node of a version 2 B-link + tree. +

    +
    VersionReserved (zero)

    Version

    +

    The version number for this B-tree leaf node. This document + describes version 0.

    +

    Data Segment SizeL

    Type

    +

    This field is the type of the B-tree node. It should always + be the same as the B-tree type in the header.

    +

    Offset to Head of Free-listL

    Records

    +

    The size of this field is determined by the number of records + for this node and the record size (from the header). The format of + records depends on the type of B-tree.

    +

    Address of Data SegmentO

    Checksum

    +

    This is the checksum for this node.

    +
    - - - - - - - -
      - (Items marked with an ‘O’ in the above table are of the size - specified in “Size of Offsets” field in the superblock.) -
      - (Items marked with an ‘L’ in the above table are of the size - specified in “Size of Lengths” field in the superblock.) -
    + +
    + +
    +

    The record layout for each stored (in other words, non-testing) + B-tree type is as follows:

    - +
    + + -
    -
    -
    Version 2 B-tree, Type 1 Record Layout - Indirectly + Accessed, Non-Filtered, ‘Huge’ Fractal Heap Objects
    - - + + + + - - + - - - + - - - + +
    Field NameDescriptionbytebytebytebyte

    Signature

    -

    The ASCII character string “HEAP” - is used to indicate the - beginning of a heap. This gives file consistency - checking utilities a better chance of reconstructing a - damaged file. -

    -

    Huge Object AddressO
    +

    Version

    -

    Each local heap has its own version number so that new - heaps can be added to old files. This document - describes version zero (0) of the local heap. -

    -

    Huge Object LengthL
    +

    Data Segment Size

    -

    The total amount of disk memory allocated for the heap - data. This may be larger than the amount of space - required by the objects stored in the heap. The extra - unused space in the heap holds a linked list of free blocks. -

    -

    Huge Object IDL
    +
    + - - + + - - - + + -

    Offset to Head of Free-list

    -

    This is the offset within the heap data segment of the - first free block (or the - undefined address if there is no - free block). The free block contains “Size of Lengths” bytes that - are the offset of the next free block (or the - value ‘1’ if this is the - last free block) followed by “Size of Lengths” bytes that store - the size of this free block. The size of the free block includes - the space used to store the offset of the next free block and - the size of the current block, making the minimum size of a free - block 2 * “Size of Lengths”. -

    -
     (Items marked with an ‘O’ in the + above table are of the size specified in “Size of + Offsets” field in the superblock.)

    Address of Data Segment

    -

    The data segment originally starts immediately after - the heap header, but if the data segment must grow as a - result of adding more objects, then the data segment may - be relocated, in its entirety, to another part of the - file. -

    -
     (Items marked with an ‘L’ in the above table are + of the size specified in “Size of Lengths” field in the + superblock.)
    -
    - -

    Objects within a local heap should be aligned on an 8-byte boundary.

    - -
    -

    -III.E. Disk Format: Level 1E - Global Heap

    - -

    Each HDF5 file has a global heap which stores various types of - information which is typically shared between datasets. The - global heap was designed to satisfy these goals:

    - -
      -
    1. Repeated access to a heap object must be efficient without - resulting in repeated file I/O requests. Since global heap - objects will typically be shared among several datasets, it is - probable that the object will be accessed repeatedly.
    2. -
    3. Collections of related global heap objects should result in - fewer and larger I/O requests. For instance, a dataset of - object references will have a global heap object for each - reference. Reading the entire set of object references - should result in a few large I/O requests instead of one small - I/O request for each reference.
    4. -
    5. It should be possible to remove objects from the global heap - and the resulting file hole should be eligible to be reclaimed - for other uses.
    6. -
    - - -

    The implementation of the heap makes use of the memory management - already available at the file level and combines that with a new - object called a collection to achieve goal B. The global heap - is the set of all collections. Each global heap object belongs to - exactly one collection and each collection contains one or more global - heap objects. For the purposes of disk I/O and caching, a collection is - treated as an atomic object, addressing goal A. -

    - -

    When a global heap object is deleted from a collection (which occurs - when its reference count falls to zero), objects located after the - deleted object in the collection are packed down toward the beginning - of the collection and the collection’s global heap object 0 is created - (if possible) or its size is increased to account for the recently - freed space. There are no gaps between objects in each collection, - with the possible exception of the final space in the collection, if - it is not large enough to hold the header for the collection’s global - heap object 0. These features address goal C. -

    - -

    The HDF5 Library creates global heap collections as needed, so there may - be multiple collections throughout the file. The set of all of them is - abstractly called the “global heap”, although they do not actually link - to each other, and there is no global place in the file where you can - discover all of the collections. The collections are found simply by - finding a reference to one through another object in the file. For - example, data of variable-length datatype elements is stored in the - global heap and is accessed via a global heap ID. The format for - global heap IDs is described at the end of this section. -

    - -
    - - +
    - A Global Heap Collection -
    +
    + +
    +
    + - - - - + + - + + - - + + - + + - - - +
    bytebytebytebyteField NameDescription
    Signature

    Huge Object Address

    +

    The address of the huge object in the file.

    +
    VersionReserved (zero)

    Huge Object Length

    +

    The length of the huge object in the file.

    +

    Collection SizeL

    Huge Object ID

    +

    The heap ID for the huge object.

    +

    Global Heap Object 1

    +
    - -
    Global Heap Object 2

    - +
    +
    +
    + + - + + + + - + - - + -
    Version 2 B-tree, Type 2 Record Layout - Indirectly + Accessed, Filtered, ‘Huge’ Fractal Heap Objects

    ...

    bytebytebytebyte

    Global Heap Object N


    Filtered Huge Object AddressO
    +

    Global Heap Object 0 (free space)


    Filtered Huge Object LengthL
    +
    - - - - - -
      - (Items marked with an ‘L’ in the above table are of the size - specified in “Size of Lengths” field in the superblock.) -
    - -
    - -
    -
    - - - + - - - + - - - + +
    Field NameDescriptionFilter Mask

    Signature

    -

    The ASCII character string “GCOL” - is used to indicate the - beginning of a collection. This gives file consistency - checking utilities a better chance of reconstructing a - damaged file. -

    -

    Filtered Huge Object Memory SizeL
    +

    Version

    -

    Each collection has its own version number so that new - collections can be added to old files. This document - describes version one (1) of the collections (there is no - version zero (0)). -

    -

    Huge Object IDL
    +
    + - - + + - - - + + +

    Collection Size

    -

    This is the size in bytes of the entire collection - including this field. The default (and minimum) - collection size is 4096 bytes which is a typical file - system block size. This allows for 127 16-byte heap - objects plus their overhead (the collection header of 16 bytes - and the 16 bytes of information about each heap object). -

    -
     (Items marked with an ‘O’ in the + above table are of the size specified in “Size of + Offsets” field in the superblock.)

    Global Heap Object 1 through N

    -

    The objects are stored in any order with no - intervening unused space. -

    -
     (Items marked with an ‘L’ in the above table are + of the size specified in “Size of Lengths” field in the + superblock.)
    + +
    +
    +
    + - - + + -

    Global Heap Object 0

    -

    Global Heap Object 0 (zero), when present, represents the free - space in the collection. Free space always appears at the end of - the collection. If the free space is too small to store the header - for Object 0 (described below) then the header is implied and the - collection contains no free space. -

    -
    Field NameDescription
    -
    - -
    -
    -
    - - - - - - + + - - + + - + + - + + - + + -
    - Global Heap Object -
    bytebytebytebyte

    Filtered Huge Object Address

    +

    The address of the filtered huge object in the file.

    +
    Heap Object IndexReference Count

    Filtered Huge Object Length

    +

    The length of the filtered huge object in the file.

    +
    Reserved (zero)

    Filter Mask

    +

    A 32-bit bit field indicating which filters have been skipped + for this chunk. Each filter has an index number in the pipeline + (starting at 0, with the first filter to apply) and if that filter + is skipped, the bit corresponding to its index is set.

    +

    Object SizeL

    Filtered Huge Object Memory Size

    +

    The size of the de-filtered huge object in memory.

    +

    Object Data

    Huge Object ID

    +

    The heap ID for the huge object.

    +
    - - - - -
      - (Items marked with an ‘L’ in the above table are of the size - specified in “Size of Lengths” field in the superblock.) -
    + +
    - +
    +
    +
    + + -
    -
    -
    Version 2 B-tree, Type 3 Record Layout - Directly + Accessed, Non-Filtered, ‘Huge’ Fractal Heap Objects
    - - + + + + - - + - - - + +
    Field NameDescriptionbytebytebytebyte

    Heap Object Index

    -

    Each object has a unique identification number within a - collection. The identification numbers are chosen so that - new objects have the smallest value possible with the - exception that the identifier 0 always refers to the - object which represents all free space within the - collection. -

    -

    Huge Object AddressO
    +

    Reference Count

    -

    All heap objects have a reference count field. An - object which is referenced from some other part of the - file will have a positive reference count. The reference - count for Object 0 is always zero. -

    -

    Huge Object LengthL
    +
    + + + + + - - + + +
     (Items marked with an ‘O’ in the + above table are of the size specified in “Size of + Offsets” field in the superblock.)

    Reserved

    -

    Zero padding to align next field on an 8-byte boundary. -

    -
     (Items marked with an ‘L’ in the above table are + of the size specified in “Size of Lengths” field in the + superblock.)
    +
    + +
    +
    + - - + + - - + + -

    Object Size

    -

    This is the size of the object data stored for the object. - The actual storage space allocated for the object data is rounded - up to a multiple of eight. -

    -
    Field NameDescription

    Object Data

    -

    The object data is treated as a one-dimensional array - of bytes to be interpreted by the caller. -

    -

    Huge Object Address

    +

    The address of the huge object in the file.

    +
    -
    + +

    Huge Object Length

    + +

    The length of the huge object in the file.

    + + -
    -

    - The format for the ID used to locate an object in the global heap is - described here:

    + + -
    - - +
    +
    +
    +
    - Global Heap ID -
    + - - - - + + + + - + - - + -
    Version 2 B-tree, Type 4 Record Layout - Directly + Accessed, Filtered, ‘Huge’ Fractal Heap Objects
    bytebytebytebytebytebytebytebyte

    Collection AddressO


    Filtered Huge Object AddressO
    +
    Object Index
    Filtered Huge Object LengthL
    +
    - - - - -
      - (Items marked with an ‘O’ in the above table are of the size - specified in “Size of Offsets” field in the superblock.) -
    - -
    - -
    -
    - + + - - + +
    Filter Mask
    Field NameDescription
    Filtered Huge Object Memory SizeL
    +
    + - - + + - - - + + +

    Collection Address

    -

    This field is the address of the global heap collection - where the data object is stored. -

    -
     (Items marked with an ‘O’ in the + above table are of the size specified in “Size of + Offsets” field in the superblock.)

    ID

    -

    This field is the index of the data object within the - global heap collection. -

    -
     (Items marked with an ‘L’ in the above table are + of the size specified in “Size of Lengths” field in the + superblock.)
    - -
    - - -
    -

    -III.F. Disk Format: Level 1F - Fractal Heap

    - -

    - Each fractal heap consists of a header and zero or more direct and - indirect blocks (described below). The header contains general - information as well as - initialization parameters for the doubling table. The Root - Block Address in the header points to the first direct or - indirect block in the heap. -

    - -

    - Fractal heaps are based on a data structure called a doubling - table. A doubling table provides a mechanism for quickly - extending an array-like data structure that minimizes the number of - empty blocks in the heap, while retaining very fast lookup of any - element within the array. More information on fractal heaps and - doubling tables can be found in the RFC - “Private - Heaps in HDF5.” -

    - -

    - The fractal heap implements the doubling table structure with - indirect and direct blocks. - Indirect blocks in the heap do not actually contain data for - objects in the heap, their “size” is abstract - - they represent the indexing structure for locating the - direct blocks in the doubling table. - Direct blocks - contain the actual data for objects stored in the heap. -

    - -

    - All indirect blocks have a constant number of block entries in each - row, called the width of the doubling table (stored in - the heap header). - - The number - of rows for each indirect block in the heap is determined by the - size of the block that the indirect block represents in the - doubling table (calculation of this is shown below) and is - constant, except for the “root” - indirect block, which expands and shrinks its number of rows as - needed. -

    - -

    - Blocks in the first two rows of an indirect block - are Starting Block Size number of bytes in size, - and the blocks in each subsequent row are twice the size of - the blocks in the previous row. In other words, blocks in - the third row are twice the Starting Block Size, - blocks in the fourth row are four times the - Starting Block Size, and so on. Entries for - blocks up to the Maximum Direct Block Size point to - direct blocks, and entries for blocks greater than that size - point to further indirect blocks (which have their own - entries for direct and indirect blocks). -

    - -

    - The number of rows of blocks, nrows, in an - indirect block of size iblock_size is given by the - following expression: -

    - nrows = (log2(iblock_size) - - log2(<Starting Block Size> * - <Width>)) + 1 -

    - -

    - The maximum number of rows of direct blocks, max_dblock_rows, - in any indirect block of a fractal heap is given by the - following expression: -

    - max_dblock_rows = - (log2(<Max. Direct Block Size>) - - log2(<Starting Block Size>)) + 2 -

    - -

    - Using the computed values for nrows and - max_dblock_rows, along with the Width of the - doubling table, the number of direct and indirect block entries - (K and N in the indirect block description, below) - in an indirect block can be computed: -

    - K = MIN(nrows, max_dblock_rows) * - Width - -

    - If nrows is less than or equal to max_dblock_rows, - N is 0. Otherwise, N is simply computed: -

    - N = K - (max_dblock_rows * - Width) -

    - -

    - The size indirect blocks on disk is determined by the number - of rows in the indirect block (computed above). The size of direct - blocks on disk is exactly the size of the block in the doubling - table. -

    - -
    - - + +
    +
    +
    - Fractal Heap Header -
    - - - - + + - + + - - + + - - + + - - + + +
    bytebytebytebyteField NameDescription
    Signature

    Filtered Huge Object Address

    +

    The address of the filtered huge object in the file.

    +
    VersionThis space inserted only to align table nicely

    Filtered Huge Object Length

    +

    The length of the filtered huge object in the file.

    +
    Heap ID LengthI/O Filters’ Encoded Length

    Filter Mask

    +

    A 32-bit bit field indicating which filters have been skipped + for this chunk. Each filter has an index number in the pipeline + (starting at 0, with the first filter to apply) and if that filter + is skipped, the bit corresponding to its index is set.

    +
    FlagsThis space inserted only to align table nicely

    Filtered Huge Object Memory Size

    +

    The size of the de-filtered huge object in memory.

    +
    +
    + +
    +
    +
    + + + - + + + + - + - - + - + +
    Version 2 B-tree, Type 5 Record Layout - Link Name + for Indexed Group
    Maximum Size of Managed Objectsbytebytebytebyte

    Next Huge Object IDL

    Hash of Name

    v2 B-tree Address of Huge ObjectsO

    ID (bytes 1-4)

    Amount of Free Space in Managed BlocksL

    ID (bytes 5-7)
    +
    + +
    +
    + - + + - + + - + + +

    Address of Managed Block Free Space ManagerO

    Field NameDescription

    Amount of Managed Space in HeapL

    Hash

    +

    This field is hash value of the name for the link. The hash + value is the Jenkins’ lookup3 checksum algorithm applied to + the link’s name.

    +

    Amount of Allocated Managed Space in HeapL

    ID

    +

    This is a 7-byte sequence of bytes and is the heap ID for the + link record in the group’s fractal heap.

    +
    +
    + +
    +
    +
    + + + - + + + + - + - - + - - + +
    Version 2 B-tree, Type 6 Record Layout - Creation + Order for Indexed Group

    Offset of Direct Block Allocation Iterator in Managed SpaceL

    bytebytebytebyte

    Number of Managed Objects in HeapL


    Creation Order (8 bytes)
    +

    Size of Huge Objects in HeapL

    ID (bytes 1-4)

    Number of Huge Objects in HeapL

    ID (bytes 5-7)
    +
    +
    +
    + - + + - + + - - + + +

    Size of Tiny Objects in HeapL

    Field NameDescription

    Number of Tiny Objects in HeapL

    Creation Order

    +

    This field is the creation order value for the link.

    +
    Table WidthThis space inserted only to align table nicely

    ID

    +

    This is a 7-byte sequence of bytes and is the heap ID for the + link record in the group’s fractal heap.

    +
    +
    + +
    +
    +
    + + + - + + + + - + + - - - + - - + - - - + +
    Version 2 B-tree, Type 7 Record Layout - Shared + Object Header Messages (Sub-Type 0 - Message in Heap)

    Starting Block SizeL

    bytebytebytebyte

    Maximum Direct Block SizeL

    Message LocationThis space inserted + only to align table nicely
    Maximum Heap SizeStarting # of Rows in Root Indirect BlockHash

    Address of Root BlockO

    Reference Count
    Current # of Rows in Root Indirect BlockThis space inserted only to align table nicely
    Heap ID (8 bytes)
    +
    +
    +
    +
    + - + + - + + - + + - + + -

    Size of Filtered Root Direct Block (optional)L

    Field NameDescription
    I/O Filter Mask (optional)

    Message Location

    +

    This field Indicates the location where the message is + stored:

    + + + + + + + + + + + + + +
    ValueDescription
    0Shared message is stored in shared message index heap.
    1Shared message is stored in object header.
    +

    +
    I/O Filter Information (optional, variable size)

    Hash

    +

    This field is hash value of the shared message. The hash + value is the Jenkins’ lookup3 checksum algorithm applied to + the shared message.

    +
    Checksum

    Reference Count

    +

    The number of objects which reference this message.

    +
    - - - - - - - -
      - (Items marked with an ‘O’ in the above table are of the size - specified in “Size of Offsets” field in the superblock.) -
      - (Items marked with an ‘L’ in the above table are of the size - specified in “Size of Lengths” field in the superblock.) -
    +

    Heap ID

    + +

    This is an 8-byte sequence of bytes and is the heap ID for + the shared message in the shared message index’s fractal + heap.

    + + -
    + + + +
    +
    +
    + + -
    -
    -
    Version 2 B-tree, Type 7 Record Layout - Shared + Object Header Messages (Sub-Type 1 - Message in Object Header)
    - - + + + + - - + + - - - + - - - + + + - - - + +
    Field NameDescriptionbytebytebytebyte

    Signature

    -

    The ASCII character string “FRHP” - is used to indicate the - beginning of a fractal heap header. This gives file consistency - checking utilities a better chance of reconstructing a - damaged file. -

    -
    Message LocationThis space inserted + only to align table nicely

    Version

    -

    This document describes version 0.

    -
    Hash

    Heap ID Length

    -

    This is the length in bytes of heap object IDs for this heap.

    -
    Reserved (zero)Message TypeObject Header Index

    I/O Filters’ Encoded Length

    -

    This is the size in bytes of the encoded I/O Filter Information. -

    -

    Object Header AddressO
    +
    + - - - - - - - + + +

    Flags

    -

    This field is the heap status flag and is a bit field - indicating additional information about the fractal heap. - - - - - - - - - - - - - - - - - - -
    Bit(s)Description
    0If set, the ID value to use for huge object has wrapped - around. If the value for the Next Huge Object ID - has wrapped around, each new huge object inserted into the - heap will require a search for an ID value. -
    1If set, the direct blocks in the heap are checksummed. -
    2-7Reserved

    - -

    Maximum Size of Managed Objects

    -

    This is the maximum size of managed objects allowed in the heap. - Objects greater than this this are ‘huge’ objects and will be - stored in the file directly, rather than in a direct block for - the heap. -

    -
     (Items marked with an ‘O’ in the + above table are of the size specified in “Size of + Offsets” field in the superblock.)
    +
    + +
    +
    + - - + + - - + + - - + + - - + + - - + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + -

    Next Huge Object ID

    -

    This is the next ID value to use for a huge object in the heap. -

    -
    Field NameDescription

    v2 B-tree Address of Huge Objects

    -

    This is the address of the v2 B-tree - used to track huge objects in the heap. The type of records - stored in the v2 B-tree will - be determined by whether the address & length of a huge object - can fit into a heap ID (if yes, it is a “directly” accessed - huge object) and whether there is a filter used on objects - in the heap. -

    -

    Message Location

    +

    This field Indicates the location where the message is + stored:

    + + + + + + + + + + + + + +
    ValueDescription
    0Shared message is stored in shared message index heap.
    1Shared message is stored in object header.
    +

    +

    Amount of Free Space in Managed Blocks

    -

    This is the total amount of free space in managed direct blocks - (in bytes). -

    -

    Hash

    +

    This field is hash value of the shared message. The hash + value is the Jenkins’ lookup3 checksum algorithm applied to + the shared message.

    +

    Address of Managed Block Free Space Manager

    -

    This is the address of the - Free-space Manager for - managed blocks. -

    -

    Message Type

    +

    The object header message type of the shared message.

    +

    Amount of Managed Space in Heap

    -

    This is the total amount of managed space in the heap (in bytes), - essentially the upper bound of the heap’s linear address space. -

    -

    Object Header Index

    +

    + This field indicates that the shared message is the nth + message of its type in the specified object header. +

    +

    Amount of Allocated Managed Space in Heap

    -

    This is the total amount of managed space (in bytes) actually - allocated in - the heap. This can be less than the Amount of Managed Space - in Heap field, if some direct blocks in the heap’s linear - address space are not allocated. -

    -

    Offset of Direct Block Allocation Iterator in Managed Space

    -

    This is the linear heap offset where the next direct - block should be allocated at (in bytes). This may be less than - the Amount of Managed Space in Heap value because the - heap’s address space is increased by a “row” of direct blocks - at a time, rather than by single direct block increments. -

    -

    Number of Managed Objects in Heap

    -

    This is the number of managed objects in the heap. -

    -

    Size of Huge Objects in Heap

    -

    This is the total size of huge objects in the heap (in bytes). -

    -

    Number of Huge Objects in Heap

    -

    This is the number of huge objects in the heap. -

    -

    Size of Tiny Objects in Heap

    -

    This is the total size of tiny objects that are packed in heap - IDs (in bytes). -

    -

    Number of Tiny Objects in Heap

    -

    This is the number of tiny objects that are packed in heap IDs. -

    -

    Table Width

    -

    This is the number of columns in the doubling table for managed - blocks. This value must be a power of two. -

    -

    Starting Block Size

    -

    This is the starting block size to use in the doubling table for - managed blocks (in bytes). This value must be a power of two. -

    -

    Maximum Direct Block Size

    -

    This is the maximum size allowed for a managed direct block. - Objects inserted into the heap that are larger than this value - (less the # of bytes of direct block prefix/suffix) - are stored as ‘huge’ objects. This value must be a power of - two. -

    -

    Maximum Heap Size

    -

    This is the maximum size of the heap’s linear address space for - managed objects (in bytes). The value stored is the log2 of - the actual value, that is: the # of bits of the address space. - ‘Huge’ and ‘tiny’ objects are not counted in this value, since - they do not store objects in the linear address space of the - heap. -

    -

    Starting # of Rows in Root Indirect Block

    -

    This is the starting number of rows for the root indirect block. - A value of 0 indicates that the root indirect block will have - the maximum number of rows needed to address the heap’s Maximum - Heap Size. -

    -

    Address of Root Block

    -

    This is the address of the root block for the heap. It can - be the undefined address if - there is no data in the heap. It either points to a direct - block (if the Current # of Rows in the Root Indirect Block - value is 0), or an indirect block. -

    -

    Current # of Rows in Root Indirect Block

    -

    This is the current number of rows in the root indirect block. - A value of 0 indicates that Address of Root Block - points to direct block instead of indirect block. -

    -

    Size of Filtered Root Direct Block

    -

    This is the size of the root direct block, if filters are - applied to heap objects (in bytes). This field is only - stored in the header if the I/O Filters’ Encoded Length - is greater than 0. -

    -

    I/O Filter Mask

    -

    This is the filter mask for the root direct block, if filters - are applied to heap objects. This mask has the same format as - that used for the filter mask in chunked raw data records in a - v1 B-tree. - This field is only - stored in the header if the I/O Filters’ Encoded Length - is greater than 0. -

    -

    I/O Filter Information

    -

    This is the I/O filter information encoding direct blocks and - huge objects, if filters are applied to heap objects. This - field is encoded as a Filter Pipeline - message. - The size of this field is determined by I/O Filters’ - Encoded Length. -

    -

    Checksum

    -

    This is the checksum for the header.

    -

    Object Header Address

    +

    The address of the object header containing the shared + message.

    +
    -
    + + -
    -
    -
    - - +
    +
    +
    +
    - Fractal Heap Direct Block -
    + - - - - + + + + - + - - - + + - - + - - + +
    Version 2 B-tree, Type 8 Record Layout - Attribute + Name for Indexed Attributes
    bytebytebytebytebytebytebytebyte
    Signature
    Heap ID (8 bytes)
    +
    VersionThis space inserted only to align table nicelyMessage FlagsThis space inserted + only to align table nicely

    Heap Header AddressO

    Creation Order
    Block Offset (variable size)Hash of Name
    +
    +
    +
    + - + + - + + -
    Checksum (optional)Field NameDescription

    Object Data (variable size)

    Heap ID

    +

    This is an 8-byte sequence of bytes and is the heap ID for + the attribute in the object’s attribute fractal heap.

    +
    - - - - -
      - (Items marked with an ‘O’ in the above table are of the size - specified in “Size of Offsets” field in the superblock.) -
    - -
    - -
    -
    - - - - + + - - + + - - + + - - - - +
    Field NameDescription

    Message Flags

    The object header message flags for the attribute + message.

    Signature

    -

    The ASCII character string “FHDB” - is used to indicate the - beginning of a fractal heap direct block. This gives file consistency - checking utilities a better chance of reconstructing a - damaged file. -

    -

    Creation Order

    +

    This field is the creation order value for the attribute.

    +

    Version

    -

    This document describes version 0.

    -

    Hash

    +

    This field is hash value of the name for the attribute. The + hash value is the Jenkins’ lookup3 checksum algorithm applied + to the attribute’s name.

    +

    Heap Header Address

    -

    This is the address for the fractal heap header that this - block belongs to. This field is principally used for file - integrity checking. -

    -
    +
    + +
    +
    +
    + + - - + + + + - - + - - - + + - -
    Version 2 B-tree, Type 9 Record Layout- Creation + Order for Indexed Attributes

    Block Offset

    -

    This is the offset of the block within the fractal heap’s - address space (in bytes). The number of bytes used to encode - this field is the Maximum Heap Size (in the heap’s - header) divided by 8 and rounded up to the next highest integer, - for values that are not a multiple of 8. This value is - principally used for file integrity checking. -

    -
    bytebytebytebyte

    Checksum

    -

    This is the checksum for the direct block.

    -

    This field is only present if bit 1 of Flags in the - heap’s header is set.

    -

    Heap ID (8 bytes)
    +

    Object Data

    -

    This section of the direct block stores the actual data for - objects in the heap. The size of this section is determined by - the direct block’s size minus the size of the other fields - stored in the direct block (for example, the Signature, - Version, and others including the Checksum if it is - present). -

    -
    Message FlagsThis space inserted + only to align table nicely
    -
    - -
    -
    -
    - - - - - - - + +
    - Fractal Heap Indirect Block -
    bytebytebytebyteCreation Order
    +
    +
    +
    + - + + - - + + - + + - + + - - - - - - - - - +
    SignatureField NameDescription
    VersionThis space inserted only to align table nicely

    Heap ID

    +

    This is an 8-byte sequence of bytes and is the heap ID for + the attribute in the object’s attribute fractal heap.

    +

    Heap Header AddressO

    Message Flags

    +

    The object header message flags for the attribute message.

    +
    Block Offset (variable size)

    Creation Order

    +

    This field is the creation order value for the attribute.

    +

    Child Direct Block #0 AddressO


    Size of Filtered Direct Block #0 (optional) L

    Filter Mask for Direct Block #0 (optional)
    +
    - -
    Child Direct Block #1 AddressO

    - - -
    Size of Filtered Direct Block #1 (optional)L

    - - - Filter Mask for Direct Block #1 (optional) - - - ... - +
    +

    + III.B. Disk Format: Level 1B - Group Symbol + Table Nodes +

    + +

    A group is an object internal to the file that allows arbitrary + nesting of objects within the file (including other groups). A group + maps a set of link names in the group to a set of relative file + addresses of objects in the file. Certain metadata for an object to + which the group points can be cached in the group’s symbol table + entry in addition to being in the object’s header.

    + +

    An HDF5 object name space can be stored hierarchically by + partitioning the name into components and storing each component as a + link in a group. The link for a non-ultimate component points to the + group containing the next component. The link for the last component + points to the object being named.

    + +

    + One implementation of a group is a collection of symbol table nodes + indexed by a B-link tree. Each symbol table node contains entries for + one or more links. If an attempt is made to add a link to an already + full symbol table node containing 2K entries, then the node is + split and one node contains K symbols and the other contains K+1 + symbols. +

    - -
    Child Direct Block #K-1 AddressO

    - - -
    Size of Filtered Direct Block #K-1 (optional)L

    - - - Filter Mask for Direct Block #K-1 (optional) - +
    + + - - + + + + + - - - - - - + + - - + + + + - + -
    Symbol Table Node (A Leaf of a B-link tree)

    Child Indirect Block #0 AddressO

    bytebytebytebyte

    Child Indirect Block #1 AddressO

    ...
    Signature

    Child Indirect Block #N-1 AddressO

    Version NumberReserved (zero)Number of Symbols
    Checksum
    +
    Group Entries
    +
    +
    + +
    - - - - +
    +
    +
      - (Items marked with an ‘O’ in the above table are of the size - specified in “Size of Offsets” field in the superblock.) -
    - - -
      - (Items marked with an ‘L’ in the above table are of the size - specified in “Size of Lengths” field in the superblock.) -
    - - - -
    -
    - - - - + + - - + + - - + + - - + + - - + + +
    Field NameDescriptionField NameDescription

    Signature

    -

    The ASCII character string “FHIB” is used to - indicate the beginning of a fractal heap indirect block. This - gives file consistency checking utilities a better chance of - reconstructing a damaged file. -

    -

    Signature

    +

    + The ASCII character string “ + SNOD + ” is used to indicate the beginning of a symbol table node. + This gives file consistency checking utilities a better chance of + reconstructing a damaged file. +

    +

    Version

    -

    This document describes version 0.

    -

    Version Number

    +

    The version number for the symbol table node. This document + describes version 1. (There is no version ‘0’ of the + symbol table node)

    +

    Heap Header Address

    -

    This is the address for the fractal heap header that this - block belongs to. This field is principally used for file - integrity checking. -

    -

    Number of Entries

    +

    Although all symbol table nodes have the same length, most + contain fewer than the maximum possible number of link entries. + This field indicates how many entries contain valid data. The valid + entries are packed at the beginning of the symbol table node while + the remaining entries contain undefined values.

    +

    Block Offset

    -

    This is the offset of the block within the fractal heap’s - address space (in bytes). The number of bytes used to encode - this field is the Maximum Heap Size (in the heap’s - header) divided by 8 and rounded up to the next highest integer, - for values that are not a multiple of 8. This value is - principally used for file integrity checking. -

    -

    Symbol Table Entries

    +

    + Each link has an entry in the symbol table node. The format of the + entry is described below. There are 2K entries in each + group node, where K is the “Group Leaf Node K” + value from the superblock. +

    +
    +
    + +
    +

    + III.C. Disk Format: Level 1C - Symbol + Table Entry +

    + +

    Each symbol table entry in a symbol table node is designed to + allow for very fast browsing of stored objects. Toward that design + goal, the symbol table entries include space for caching certain + constant metadata from the object header.

    + +
    + + - - + + + + - - - - - - - + + - - + - - + -
    Symbol Table Entry

    Child Direct Block #K Address

    -

    This field is the address of the child direct block. - The size of the [uncompressed] direct block can be computed by - its offset in the heap’s linear address space. -

    -
    bytebytebytebyte

    Size of Filtered Direct Block #K

    -

    This is the size of the child direct block after passing through - the I/O filters defined for this heap (in bytes). If no I/O - filters are present for this heap, this field is not present. -

    -

    Filter Mask for Direct Block #K

    -

    This is the I/O filter mask for the filtered direct block. - This mask has the same format as that used for the filter mask - in chunked raw data records in a v1 B-tree. - If no I/O filters are present for this heap, this field is not - present. -

    -

    Link Name OffsetO
    +

    Child Indirect Block #N Address

    -

    This field is the address of the child indirect block. - The size of the indirect block can be computed by - its offset in the heap’s linear address space. -

    -

    Object Header AddressO
    +

    Checksum

    -

    This is the checksum for the indirect block.

    -
    Cache Type
    - -
    - -
    -

    An object in the fractal heap is identified by means of a fractal heap ID, - which encodes information to locate the object in the heap. - Currently, the fractal heap stores an object in one of three ways, - depending on the object’s size:

    - -
    - - - - - - - - - - - - - - - - - - - + + -
    TypeDescription
    Tiny -

    When an object is small enough to be encoded in the heap ID, the - object’s data is embedded in the fractal heap ID itself. There are - 2 sub-types for this type of object: normal and extended. The - sub-type for tiny heap IDs depends on whether the heap ID is large - enough to store objects greater than 16 bytes or not. If the - heap ID length is 18 bytes or smaller, the ‘normal’ tiny heap ID - form is used. If the heap ID length is greater than 18 bytes in - length, the “extended” form is used. See format description below - for both sub-types. -

    -
    Huge -

    When the size of an object is larger than Maximum Size of - Managed Objects in the Fractal Heap Header, the - object’s data is stored on its own in the file and the object - is tracked/indexed via a version 2 B-tree. All huge objects - for a particular fractal heap use the same v2 B-tree. All huge - objects for a particular fractal heap use the same format for - their huge object IDs. -

    - -

    Depending on whether the IDs for a heap are large enough to hold - the object’s retrieval information and whether I/O pipeline filters - are applied to the heap’s objects, 4 sub-types are derived for - huge object IDs for this heap:

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Sub-typeDescription
    Directly accessed, non-filtered -

    The object’s address and length are embedded in the - fractal heap ID itself and the object is directly accessed - from them. This allows the object to be accessed without - resorting to the B-tree. -

    -
    Directly accessed, filtered -

    The filtered object’s address, length, filter mask and - de-filtered size are embedded in the fractal heap ID itself - and the object is accessed directly with them. This allows - the object to be accessed without resorting to the B-tree. -

    -
    Indirectly accessed, non-filtered -

    The object is located by using a B-tree key embedded in - the fractal heap ID to retrieve the address and length from - the version 2 B-tree for huge objects. Then, the address - and length are used to access the object. -

    -
    Indirectly accessed, filtered -

    The object is located by using a B-tree key embedded in - the fractal heap ID to retrieve the filtered object’s - address, length, filter mask and de-filtered size from the - version 2 B-tree for huge objects. Then, this information - is used to access the object. -

    -
    -
    - -
    Managed -

    When the size of an object does not meet the above two - conditions, the object is stored and managed via the direct and - indirect blocks based on the doubling table. -

    -
    Reserved (zero)
    -
    + +
    +
    Scratch-pad Space (16 bytes)
    +
    +
    + + -

    The specific format for each type of heap ID is described below: -

    + + + + + +
     (Items marked with an ‘O’ in the + above table are of the size specified in “Size of + Offsets” field in the superblock.)
    -
    - - + +
    +
    +
    Fractal Heap ID for Tiny Objects (sub-type 1 - ‘Normal’) -
    - - - - - + + + - - + + - + + -
    bytebytebytebyte
    Field NameDescription
    Version, Type & LengthThis space inserted only to align table nicely

    Link Name Offset

    +

    This is the byte offset into the group’s local heap for + the name of the link. The name is null terminated.

    +

    Data (variable size)

    Object Header Address

    +

    Every object has an object header which serves as a permanent + location for the object’s metadata. In addition to appearing + in the object header, some of the object’s metadata can be + cached in the scratch-pad space.

    +
    -
    - -
    -
    - - - - - - - - - - - - - - - - -
    Field NameDescription

    Version, Type & Length

    -

    This is a bit field with the following definition: - - - - - - - - - - - - - - - - - - -
    BitDescription
    6-7The current version of ID format. This document - describes version 0. -
    4-5The ID type. Tiny objects have a value of 2. -
    0-3The length of the tiny object. The value stored - is one less than the actual length (since zero-length - objects are not allowed to be stored in the heap). - For example, an object of actual length 1 has an - encoded length of 0, an object of actual length 2 - has an encoded length of 1, and so on. -

    - -

    Data

    -

    This is the data for the object. -

    -
    -
    - -
    -
    -
    - - - - - - - - + + + - - - + + - + + +
    Fractal Heap ID for Tiny Objects (sub-type 2 - ‘Extended’) -
    bytebytebytebyte

    Cache Type

    +

    The cache type is determined from the object header. It also + determines the format for the scratch-pad space:

    + + + + + + + + + + + + + + + + + +
    TypeDescription
    0No data is cached by the group entry. This is guaranteed + to be the case when an object header has a link count greater + than one.
    1Group object header metadata is cached in the scratch-pad + space. This implies that the symbol table entry refers to another + group.
    2The entry is a symbolic link. The first four bytes of the + scratch-pad space are the offset into the local heap for the link + value. The object header address will be undefined.
    +

    + +
    Version, Type & LengthExtended LengthThis space inserted only to align table nicely

    Reserved

    +

    These four bytes are present so that the scratch-pad space is + aligned on an eight-byte boundary. They are always set to zero.

    +
    Data (variable size)

    Scratch-pad Space

    +

    This space is used for different purposes, depending on the + value of the Cache Type field. Any metadata about an object + represented in the scratch-pad space is duplicated in the object + header for that object.

    +

    Furthermore, no data is cached in the group entry scratch-pad + space if the object header for the object has a link count greater + than one.

    +
    +
    - - +
    +

    Format of the Scratch-pad Space

    -
    -
    - - - - - - - - - - - - - - - - - - - - +

    The symbol table entry scratch-pad space is formatted according + to the value in the Cache Type field.

    -
    Field NameDescription

    Version, Type & Length

    -

    This is a bit field with the following definition: - - - - - - - - - - - - - - - - - - -
    BitDescription
    6-7The current version of ID format. This document - describes version 0. -
    4-5The ID type. Tiny objects have a value of 2. -
    0-3These 4 bits, together with the next byte, form an - unsigned 12-bit integer for holding the length of the - object. These 4-bits are bits 8-11 of the 12-bit integer. - See description for the Extended Length field below. -

    - -

    Extended Length

    -

    This byte, together with the 4 bits in the previous byte, - forms an unsigned 12-bit integer for holding the length of - the tiny object. These 8 bits are bits 0-7 of the 12-bit - integer formed. The value stored is one less than the actual - length (since zero-length objects are not allowed to be - stored in the heap). For example, an object of actual length - 1 has an encoded length of 0, an object of actual length - 2 has an encoded length of 1, and so on. -

    -

    Data

    -

    This is the data for the object. -

    -
    -
    +

    + If the Cache Type field contains the value zero + (0) + then no information is stored in the scratch-pad space. +

    +

    + If the Cache Type field contains the value one + (1) + , then the scratch-pad space contains cached metadata for another + object header in the following format: +

    -
    -
    -
    - - +
    +
    Fractal Heap ID for Huge Objects (sub-type 1 & 2): indirectly accessed, non-filtered/filtered -
    + - - - - + + + + - - + - + +
    Object Header Scratch-pad Format
    bytebytebytebytebytebytebytebyte
    Version & TypeThis space inserted only to align table nicely
    Address of B-treeO
    +

    v2 B-tree KeyL (variable size)


    Address of Name HeapO
    +
    - - - +
    - - -
      - (Items marked with an ‘L’ in the above table are of the size - specified in “Size of Lengths” field in the superblock.) -
    -
    +   + (Items marked with an ‘O’ in the + above table are of the size specified in “Size of + Offsets” field in the superblock.) + + -
    -
    - + + +
    +
    +
    - - + + - - + + - - + +
    Field NameDescriptionField NameDescription

    Version & Type

    -

    This is a bit field with the following definition: - - - - - - - - - - - - - - - - - - -
    BitDescription
    6-7The current version of ID format. This document - describes version 0. -
    4-5The ID type. Huge objects have a value of 1. -
    0-3Reserved. -

    - -

    Address of B-tree

    +

    This is the file address for the root of the group’s + B-tree.

    +

    v2 B-tree Key

    This field is the B-tree key for retrieving the information - from the version 2 B-tree for huge objects needed to access the - object. See the description of v2 B-tree - records sub-type 1 & 2 for a description of the fields. New key - values are derived from Next Huge Object ID in the - Fractal Heap Header.

    +

    Address of Name Heap

    +

    This is the file address for the group’s local heap, in + which are stored the group’s symbol names.

    +
    - - -
    -
    -
    - - +
    +

    + If the Cache Type field contains the value two + (2) + , then the scratch-pad space contains cached metadata for a symbolic + link in the following format: +

    + +
    +
    Fractal Heap ID for Huge Objects (sub-type 3): directly accessed, non-filtered -
    + - - - - + + + + - - + +
    Symbolic Link Scratch-pad Format
    bytebytebytebytebytebytebytebyte
    Version & TypeThis space inserted only to align table nicelyOffset to Link Value
    +
    +
    +
    + - + + - + + +

    Address O

    Field NameDescription

    Length L

    Offset to Link Value

    +

    The value of a symbolic link (that is, the name of the thing + to which it points) is stored in the local heap. This field is the + 4-byte offset into the local heap for the start of the link value, + which is null terminated.

    +
    +
    - +
    +

    + III.D. Disk Format: Level 1D - Local Heaps +

    - - - - - - - -
      - (Items marked with an ‘O’ in the above table are of the size - specified in “Size of Offsets” field in the superblock.) -
      - (Items marked with an ‘L’ in the above table are of the size - specified in “Size of Lengths” field in the superblock.) -
    +

    A local heap is a collection of small pieces of data that are + particular to a single object in the HDF5 file. Objects can be inserted + and removed from the heap at any time. The address of a heap does not + change once the heap is created. For example, a group stores addresses + of objects in symbol table nodes with the names of links stored in the + group’s local heap.

    - +
    + + -
    -
    -
    Local Heap
    - - + + + + - - + - - + + - - + -
    Field NameDescriptionbytebytebytebyte

    Version & Type

    -

    This is a bit field with the following definition: - - - - - - - - - - - - - - - - - - -
    BitDescription
    6-7The current version of ID format. This document - describes version 0. -
    4-5The ID type. Huge objects have a value of 1. -
    0-3Reserved. -

    - -
    Signature

    Address

    This field is the address of the object in the file.

    -
    VersionReserved (zero)

    Length

    This field is the length of the object in the file.

    -

    Data Segment SizeL
    +
    -
    - -
    -
    -
    - - - - - - + - - + +
    Fractal Heap ID for Huge Objects (sub-type 4): directly accessed, filtered -
    bytebytebytebyte
    Offset to Head of Free-listL
    +
    Version & TypeThis space inserted only to align table nicely
    Address of Data SegmentO
    +
    + - + + - - + + +

    Address O

     (Items marked with an ‘O’ in the + above table are of the size specified in “Size of + Offsets” field in the superblock.)

    Length L

     (Items marked with an ‘L’ in the above table are + of the size specified in “Size of Lengths” field in the + superblock.)
    + +
    +
    +
    + - + + - + + -
    Filter MaskField NameDescription

    De-filtered Size L

    Signature

    +

    + The ASCII character string “ + HEAP + ” is used to indicate the beginning of a heap. This gives + file consistency checking utilities a better chance of + reconstructing a damaged file. +

    +
    - - - - + + - - - -
      - (Items marked with an ‘O’ in the above table are of the size - specified in “Size of Offsets” field in the superblock.) -

    Version

    +

    Each local heap has its own version number so that new heaps + can be added to old files. This document describes version zero (0) + of the local heap.

    +
     (Items marked with an ‘L’ in the above table are of the size - specified in “Size of Lengths” field in the superblock.) -
    -
    - -
    -
    - - - + + - - + + - - - - - - - - - - - - - - - - - - + + + +
    Field NameDescription

    Data Segment Size

    +

    The total amount of disk memory allocated for the heap data. + This may be larger than the amount of space required by the objects + stored in the heap. The extra unused space in the heap holds a + linked list of free blocks.

    +

    Version & Type

    -

    This is a bit field with the following definition: - - - - - - - - - - - - - - - - - - -
    BitDescription
    6-7The current version of ID format. This document - describes version 0. -
    4-5The ID type. Huge objects have a value of 1. -
    0-3Reserved. -

    - -

    Offset to Head of Free-list

    +

    + This is the offset within the heap data segment of the first free + block (or the undefined address if + there is no free block). The free block contains “Size of + Lengths” bytes that are the offset of the next free block (or + the value ‘1’ if this is the last free block) followed + by “Size of Lengths” bytes that store the size of this + free block. The size of the free block includes the space used to + store the offset of the next free block and the size of the current + block, making the minimum size of a free block 2 * “Size of + Lengths”. +

    +

    Address

    This field is the address of the filtered object in the file.

    -

    Length

    This field is the length of the filtered object in the file.

    -

    Filter Mask

    This field is the I/O pipeline filter mask for the - filtered object in the file.

    -

    Filtered Size

    This field is the size of the de-filtered object in the file.

    -

    Address of Data Segment

    +

    The data segment originally starts immediately after the heap + header, but if the data segment must grow as a result of adding + more objects, then the data segment may be relocated, in its + entirety, to another part of the file.

    +
    +
    - - +

    Objects within a local heap should be aligned on an 8-byte + boundary.

    -
    -
    -
    - - +
    +

    + III.E. Disk Format: Level 1E - Global Heap +

    + +

    Each HDF5 file has a global heap which stores various types of + information which is typically shared between datasets. The global heap + was designed to satisfy these goals:

    + +
      +
    1. Repeated access to a heap object must be efficient without + resulting in repeated file I/O requests. Since global heap objects + will typically be shared among several datasets, it is probable that + the object will be accessed repeatedly.
    2. +
    3. Collections of related global heap objects should result in + fewer and larger I/O requests. For instance, a dataset of object + references will have a global heap object for each reference. Reading + the entire set of object references should result in a few large I/O + requests instead of one small I/O request for each reference.
    4. +
    5. It should be possible to remove objects from the global heap + and the resulting file hole should be eligible to be reclaimed for + other uses.
    6. +
    + + +

    + The implementation of the heap makes use of the memory management + already available at the file level and combines that with a new object + called a collection to achieve goal B. The global heap is the + set of all collections. Each global heap object belongs to exactly one + collection and each collection contains one or more global heap + objects. For the purposes of disk I/O and caching, a collection is + treated as an atomic object, addressing goal A. +

    - - - - - - +

    When a global heap object is deleted from a collection (which + occurs when its reference count falls to zero), objects located after + the deleted object in the collection are packed down toward the + beginning of the collection and the collection’s global heap + object 0 is created (if possible) or its size is increased to account + for the recently freed space. There are no gaps between objects in each + collection, with the possible exception of the final space in the + collection, if it is not large enough to hold the header for the + collection’s global heap object 0. These features address goal C. +

    + +

    The HDF5 Library creates global heap collections as needed, so + there may be multiple collections throughout the file. The set of all + of them is abstractly called the “global heap”, although + they do not actually link to each other, and there is no global place + in the file where you can discover all of the collections. The + collections are found simply by finding a reference to one through + another object in the file. For example, data of variable-length + datatype elements is stored in the global heap and is accessed via a + global heap ID. The format for global heap IDs is described at the end + of this section.

    + +
    +
    Fractal Heap ID for Managed Objects -
    bytebytebytebyte
    + - - + + + + + - + - + + -
    A Global Heap Collection
    Version & TypeThis space inserted only to align table nicelybytebytebytebyte
    Offset (variable size)Signature
    Length (variable size)VersionReserved (zero)
    -
    -
    -
    - - - - - - - - - - - - - - - - - - - + + -
    Field NameDescription

    Version & Type

    This is a bit field with the following definition: - - - - - - - - - - - - - - - - - - -
    BitDescription
    6-7The current version of ID format. This document - describes version 0. -
    4-5The ID type. Managed objects have a value of 0. -
    0-3Reserved. -

    -

    Offset

    This field is the offset of the object in the heap. - This field’s size is the minimum number of bytes - necessary to encode the Maximum Heap Size value - (from the Fractal Heap Header). For example, if the - value of the Maximum Heap Size is less than 256 bytes, - this field is 1 byte in length, a Maximum Heap Size - of 256-65535 bytes uses a 2 byte length, and so on.

    Length

    This field is the length of the object in the heap. It - is determined by taking the minimum value of Maximum - Direct Block Size and Maximum Size of Managed - Objects in the Fractal Heap Header. Again, - the minimum number of bytes needed to encode that value is - used for the size of this field.


    Collection SizeL
    +
    -
    - -
    -

    -III.G. Disk Format: Level 1G - Free-space Manager

    - -

    - Free-space managers are used to describe space within a heap or - the entire HDF5 file that is not currently used for that heap or - file. -

    - -

    - The free-space manager header contains metadata information - about the space being tracked, along with the address of the list - of free space sections which actually describes the free - space. The header records information about free-space sections being - tracked, creation parameters for handling free-space sections of a - client, and section information used to locate the collection of - free-space sections. -

    - -

    - The free-space section list stores a collection of - free-space sections that is specific to each client of the - free-space manager. - - For example, the fractal heap is a client of the free space manager - and uses it to track unused space within the heap. There are 4 - types of section records for the fractal heap, each of which has - its own format, listed below. -

    - -
    - - - - - - + - + - - - + - + - + +
    - Free-space Manager Header -
    bytebytebytebyte
    Global Heap Object 1
    +
    Signature
    Global Heap Object 2
    +
    VersionClient IDThis space inserted only to align table nicely
    ...
    +

    Total Space TrackedL


    Global Heap Object N
    +

    Total Number of SectionsL


    Global Heap Object 0 (free space)
    +
    + - - - - - - + + + +

    Number of Serialized SectionsL


    Number of Un-Serialized SectionsL

     (Items marked with an ‘L’ in the + above table are of the size specified in “Size of + Lengths” field in the superblock.)
    - - Number of Section Classes - This space inserted only to align table nicely - +
    +
    +
    + - - + + - - - + + + - - + + + - + + - + + - - + + + +
    Shrink PercentExpand PercentField NameDescription
    Size of Address SpaceThis space inserted only to align table nicely

    Signature

    +

    + The ASCII character string “ + GCOL + ” is used to indicate the beginning of a collection. This + gives file consistency checking utilities a better chance of + reconstructing a damaged file. +

    +

    Maximum Section Size L

    Version

    +

    Each collection has its own version number so that new + collections can be added to old files. This document describes + version one (1) of the collections (there is no version zero (0)). +

    +

    Address of Serialized Section ListO

    Collection Size

    +

    This is the size in bytes of the entire collection including + this field. The default (and minimum) collection size is 4096 bytes + which is a typical file system block size. This allows for 127 + 16-byte heap objects plus their overhead (the collection header of + 16 bytes and the 16 bytes of information about each heap object).

    +

    Size of Serialized Section List UsedL

    + Global Heap Object 1 through N +

    +

    The objects are stored in any order with no intervening + unused space.

    +

    Allocated Size of Serialized Section ListL

    Global Heap Object 0

    +

    Global Heap Object 0 (zero), when present, represents the + free space in the collection. Free space always appears at the end + of the collection. If the free space is too small to store the + header for Object 0 (described below) then the header is implied + and the collection contains no free space.

    +
    +
    - - Checksum - - +
    +
    +
    + + -
    Global Heap Object
    - - - - - -
      - (Items marked with an ‘O’ in the above table are of the size - specified in “Size of Offsets” field in the superblock.) -
      - (Items marked with an ‘L’ in the above table are of the size - specified in “Size of Lengths” field in the superblock.) -
    - -
    - -
    -
    - - - - + + + + - - + + - - + - - + - - - - - - - - - - - - - - - - + +
    Field NameDescriptionbytebytebytebyte

    Signature

    -

    The ASCII character string “FSHD” is used to - indicate the beginning of the Free-space Manager Header. - This gives file consistency checking utilities a better chance of - reconstructing a damaged file. -

    -
    Heap Object IndexReference Count

    Version

    -

    This is the version number for the Free-space Manager Header - and this document describes version 0.

    -
    Reserved (zero)

    Client ID

    -

    This is the client ID for identifying the user of this - free-space manager: - - - - - - - - - - - - - - - - - - - -
    IDDescription
    0Fractal heap -
    1File -
    2+Reserved. -

    - -

    Object SizeL
    +

    Total Space Tracked

    -

    This is the total amount of free space being tracked, in bytes. -

    -

    Total Number of Sections

    -

    This is the total number of free-space sections being tracked. -

    -

    Number of Serialized Sections

    -

    This is the number of serialized free-space sections being - tracked. -

    -

    Number of Un-Serialized Sections

    -

    This is the number of un-serialized free-space sections being - managed. Un-serialized sections are created by the free-space - client when the list of sections is read in. -

    -

    Object Data
    +
    + - - + + +

    Number of Section Classes

    -

    This is the number of section classes handled by this free space - manager for the free-space client. -

    -
     (Items marked with an ‘L’ in the + above table are of the size specified in “Size of + Lengths” field in the superblock.)
    - -

    Shrink Percent

    - -

    This is the percent of current size to shrink the allocated - serialized free-space section list. -

    - - +
    +
    +
    + - - + + - - + + - - + + - - + + - - + + - - + + +

    Expand Percent

    -

    This is the percent of current size to expand the allocated - serialized free-space section list. -

    -
    Field NameDescription

    Size of Address Space

    -

    This is the size of the address space that free-space sections - are within. This is stored as the log2 of the - actual value (in other words, the number of bits required - to store values within that address space). -

    -

    Heap Object Index

    +

    + Each object has a unique identification number within a collection. + The identification numbers are chosen so that new objects have the + smallest value possible with the exception that the identifier + 0 + always refers to the object which represents all free space within + the collection. +

    +

    Maximum Section Size

    -

    This is the maximum size of a section to be tracked. -

    -

    Reference Count

    +

    All heap objects have a reference count field. An object + which is referenced from some other part of the file will have a + positive reference count. The reference count for Object 0 is + always zero.

    +

    Address of Serialized Section List

    -

    This is the address where the serialized free-space section - list is stored. -

    -

    Reserved

    +

    Zero padding to align next field on an 8-byte boundary.

    +

    Size of Serialized Section List Used

    -

    This is the size of the serialized free-space section - list used (in bytes). This value must be less than - or equal to the allocated size of serialized section - list, below. -

    -

    Object Size

    +

    This is the size of the object data stored for the object. + The actual storage space allocated for the object data is rounded + up to a multiple of eight.

    +

    Allocated Size of Serialized Section List

    -

    This is the size of serialized free-space section list - actually allocated (in bytes). -

    -

    Object Data

    +

    The object data is treated as a one-dimensional array of + bytes to be interpreted by the caller.

    +
    - -

    Checksum

    - -

    This is the checksum for the free-space manager header.

    - - +
    - - +
    +

    The format for the ID used to locate an object in the global heap + is described here:

    -
    -

    The free-space sections being managed are stored in a - free-space section list, described below. The sections - in the free-space section list are stored in the following way: - a count of the number of sections describing a particular size of - free space and the size of the free-space described (in bytes), - followed by a list of section description records; then another - section count and size, followed by the list of section - descriptions for that size; and so on.

    - - -
    - - +
    +
    - Free-space Section List -
    + - - - - + + + + - + - - + +
    Global Heap ID
    bytebytebytebytebytebytebytebyte
    Signature
    Collection AddressO
    +
    VersionThis space inserted only to align table nicelyObject Index
    + - + + +

    Free-space Manager Header AddressO

     (Items marked with an ‘O’ in the + above table are of the size specified in “Size of + Offsets” field in the superblock.)
    +
    + +
    +
    + - + + - - - - - - - - - - - + + + - - - + + + + - - - +
    Number of Section Records in Set #0 (variable size)Field NameDescription
    Size of Free-space Section Described in Record Set #0 (variable size)
    Record Set #0 Section Record #0 Offset(variable size)
    Record Set #0 Section Record #0 TypeThis space inserted only to align table nicely

    Collection Address

    +

    This field is the address of the global heap collection where + the data object is stored.

    +
    Record Set #0 Section Record #0 Data (variable size)

    ID

    +

    This field is the index of the data object within the global + heap collection.

    +
    ...
    +
    - - Record Set #0 Section Record #K-1 Offset(variable size) - - - Record Set #0 Section Record #K-1 Type - This space inserted only to align table nicely - +
    +

    + III.F. Disk Format: Level 1F - Fractal Heap +

    + +

    + Each fractal heap consists of a header and zero or more direct and + indirect blocks (described below). The header contains general + information as well as initialization parameters for the doubling + table. The Root Block Address in the header points to the + first direct or indirect block in the heap. +

    - - Record Set #0 Section Record #K-1 Data (variable size) - +

    + Fractal heaps are based on a data structure called a doubling + table. A doubling table provides a mechanism for quickly extending an + array-like data structure that minimizes the number of empty blocks in + the heap, while retaining very fast lookup of any element within the + array. More information on fractal heaps and doubling tables can be + found in the RFC “Private Heaps in + HDF5.” +

    - - Number of Section Records in Set #1 (variable size) - +

    The fractal heap implements the doubling table structure with + indirect and direct blocks. Indirect blocks in the heap do not actually + contain data for objects in the heap, their “size” is + abstract - they represent the indexing structure for locating the + direct blocks in the doubling table. Direct blocks contain the actual + data for objects stored in the heap.

    + +

    + All indirect blocks have a constant number of block entries in each + row, called the width of the doubling table (stored in the + heap header). The number of rows for each indirect block in the heap is + determined by the size of the block that the indirect block represents + in the doubling table (calculation of this is shown below) and is + constant, except for the “root” indirect block, which + expands and shrinks its number of rows as needed. +

    - - Size of Free-space Section Described in Record Set #1 (variable size) - +

    + Blocks in the first two rows of an indirect block are Starting + Block Size number of bytes in size, and the blocks in each subsequent + row are twice the size of the blocks in the previous row. In other + words, blocks in the third row are twice the Starting Block + Size, blocks in the fourth row are four times the Starting + Block Size, and so on. Entries for blocks up to the Maximum + Direct Block Size point to direct blocks, and entries for blocks + greater than that size point to further indirect blocks (which have + their own entries for direct and indirect blocks). +

    - - Record Set #1 Section Record #0 Offset(variable size) - +

    + The number of rows of blocks, nrows, in an indirect block of + size iblock_size is given by the following expression:
    +
    nrows = (log2(iblock_size) - log2(<Starting + Block Size> * <Width>)) + 1 +

    - - Record Set #1 Section Record #0 Type - This space inserted only to align table nicely - +

    + The maximum number of rows of direct blocks, max_dblock_rows, + in any indirect block of a fractal heap is given by the following + expression:

    max_dblock_rows = (log2(<Max. + Direct Block Size>) - log2(<Starting Block + Size>)) + 2 +

    - - Record Set #1 Section Record #0 Data (variable size) - +

    + Using the computed values for nrows and max_dblock_rows, + along with the Width of the doubling table, the number of + direct and indirect block entries (K and N in the + indirect block description, below) in an indirect block can be + computed:

    K = MIN(nrows, max_dblock_rows) + * Width

    If nrows is less than or + equal to max_dblock_rows, N is 0. Otherwise, N + is simply computed:

    N = K - (max_dblock_rows + * Width) +

    - - ... - +

    The size indirect blocks on disk is determined by the number of + rows in the indirect block (computed above). The size of direct blocks + on disk is exactly the size of the block in the doubling table.

    - - Record Set #1 Section Record #K-1 Offset(variable size) - +
    + + - - - - + + + + + + - - - + + + - - - + + + + - - - + + + + - + + - - + + - - - + + + - - - - + + + - - - + + + - - - + + + - - - + + + - - - - + + + - - - + + + - - - -
    Fractal Heap Header
    Record Set #1 Section Record #K-1 TypeThis space inserted only to align table nicely
    bytebytebytebyte
    Record Set #1 Section Record #K-1 Data (variable size)
    Signature
    ...
    VersionThis space inserted + only to align table nicely
    ...
    Heap ID LengthI/O Filters’ Encoded Length
    Number of Section Records in Set #N-1 (variable size)FlagsThis space inserted + only to align table nicely
    Size of Free-space Section Described in Record Set #N-1 (variable size)
    Maximum Size of Managed Objects
    Record Set #N-1 Section Record #0 Offset(variable size)

    Next Huge Object IDL
    +
    Record Set #N-1 Section Record #0 TypeThis space inserted only to align table nicely

    v2 B-tree Address of Huge ObjectsO
    +
    Record Set #N-1 Section Record #0 Data (variable size)

    Amount of Free Space in Managed BlocksL
    +
    ...

    Address of Managed Block Free Space + ManagerO
    +
    Record Set #N-1 Section Record #K-1 Offset(variable size)

    Amount of Managed Space in HeapL
    +
    Record Set #N-1 Section Record #K-1 TypeThis space inserted only to align table nicely

    Amount of Allocated Managed Space in HeapL
    +
    Record Set #N-1 Section Record #K-1 Data (variable size)

    Offset of Direct Block Allocation + Iterator in Managed SpaceL
    +
    Checksum
    + +
    Number of Managed Objects in HeapL
    +
    + - - - -
      - (Items marked with an ‘O’ in the above table are of the size - specified in “Size of Offsets” field in the superblock.) -
    -
    +
    Size of Huge Objects in HeapL
    +
    + -
    -
    - - - - + + - - + - - + - - + + - - - + + -

    - The number of sets of free-space section records is - determined by the size of serialized section list in - the free-space manager header. -

    - - + + + - - + + -

    - The length of this field is the minimum number of bytes needed - to store the maximum section size (from the - free-space manager header). -

    - + + - - + + -

    - The length of this field is the minimum number of bytes needed - to store the size of address space (from the - free-space manager header). -

    - + + - - + -
    Field NameDescription

    Number of Huge Objects in HeapL
    +

    Signature

    -

    The ASCII character string “FSSE” is used to - indicate the beginning of the Free-space Section Information. - This gives file consistency checking utilities a better chance of - reconstructing a damaged file. -

    -

    Size of Tiny Objects in HeapL
    +

    Version

    -

    This is the version number for the Free-space Section List - and this document describes version 0.

    -

    Number of Tiny Objects in HeapL
    +

    Free-space Manager Header Address

    -

    This is the address of the Free-space Manager Header. - This field is principally used for file - integrity checking. -

    -
    Table WidthThis space inserted + only to align table nicely

    Number of Section Records for Set #N

    -

    This is the number of free-space section records for set #N. - The length of this field is the minimum number of bytes needed - to store the number of serialized sections (from the - free-space manager header). -

    +

    Starting Block SizeL
    +

    Maximum Direct Block SizeL
    +

    Section Size for Record Set #N

    -

    This is the size (in bytes) of the free-space section described - for all the section records in set #N. -

    +
    Maximum Heap SizeStarting # of Rows in Root Indirect Block

    Address of Root BlockO
    +

    Record Set #N Section #K Offset

    -

    This is the offset (in bytes) of the free-space section within - the client for the free-space manager. -

    +
    Current # of Rows in Root Indirect BlockThis space inserted + only to align table nicely

    Size of Filtered Root Direct Block (optional)L
    +

    Record Set #N Section #K Type

    -

    This is the type of the section record, used to decode the - record set #N section #K data information. The defined - record type for file client is: +

    I/O Filter Mask (optional)
    - - - - + + + - - - - - - - - -
    TypeDescription
    I/O Filter Information (optional, + variable size)
    0File’s section (a range of actual bytes in file) -
    1+Reserved. -

    + + Checksum + -

    The defined record types for a fractal heap client are: + - - - - - +
    TypeDescription
    + + + + + + + + +
     (Items marked with an ‘O’ in the + above table are of the size specified in “Size of + Offsets” field in the superblock.)
     (Items marked with an ‘L’ in the above table are + of the size specified in “Size of Lengths” field in the + superblock.)
    - - 0 - Fractal heap “single” section - - +

    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Field NameDescription

    Signature

    +

    + The ASCII character string “ + FRHP + ” is used to indicate the beginning of a fractal heap header. + This gives file consistency checking utilities a better chance of + reconstructing a damaged file. +

    +

    Version

    +

    This document describes version 0.

    +

    Heap ID Length

    +

    This is the length in bytes of heap object IDs for this heap.

    +

    I/O Filters’ Encoded Length

    +

    + This is the size in bytes of the encoded I/O Filter + Information. +

    +

    Flags

    +

    This field is the heap status flag and is a bit field + indicating additional information about the fractal heap.

    + - - + + - - + + - - - + + - - - + + -
    1Fractal heap “first row” section - Bit(s)Description
    2Fractal heap “normal row” section - 0If set, the ID value to use for huge object has wrapped + around. If the value for the Next Huge Object ID has + wrapped around, each new huge object inserted into the heap will + require a search for an ID value. +
    3Fractal heap “indirect” section - 1If set, the direct blocks in the heap are checksummed.
    4+Reserved. - 2-7Reserved

    +
    +

    - + -

    Record Set #N Section #K Data

    - -

    This is the section-type specific information for each record - in the record set, described below. -

    - +

    Maximum Size of Managed Objects

    + +

    This is the maximum size of managed objects allowed in the + heap. Objects greater than this this are ‘huge’ objects + and will be stored in the file directly, rather than in a direct + block for the heap.

    + -

    Checksum

    - -

    This is the checksum for the Free-space Section List. -

    - +

    Next Huge Object ID

    + +

    This is the next ID value to use for a huge object in the + heap.

    + - -
    - -
    -

    - The section-type specific data for each free-space section record is - described below: -

    - -
    - - - - + + -
    - File’s Section Data Record -
    No additional record data stored

    v2 B-tree Address of Huge Objects

    +

    + This is the address of the v2 B-tree used + to track huge objects in the heap. The type of records stored in + the v2 B-tree will be determined by whether the address & + length of a huge object can fit into a heap ID (if yes, it is a + “directly” accessed huge object) and whether there is a + filter used on objects in the heap. +

    +
    -
    - -
    -
    -
    - - - + + -
    - Fractal Heap “Single” Section Data Record -
    No additional record data stored

    Amount of Free Space in Managed Blocks

    +

    This is the total amount of free space in managed direct + blocks (in bytes).

    +
    -
    - -
    -
    -
    - - - + + -
    - Fractal Heap “First Row” Section Data Record -
    Same format as “indirect” section data

    Address of Managed Block Free Space Manager

    +

    + This is the address of the Free-space + Manager for managed blocks. +

    +
    -
    - -
    -
    -
    - - - + + -
    - Fractal Heap “Normal Row” Section Data Record -
    No additional record data stored

    Amount of Managed Space in Heap

    +

    This is the total amount of managed space in the heap (in + bytes), essentially the upper bound of the heap’s linear + address space.

    +
    -
    - -
    -
    -
    - - - - - - + + - + + - - + + - - + + -
    - Fractal Heap “Indirect” Section Data Record -
    bytebytebytebyte

    Amount of Allocated Managed Space in Heap

    +

    + This is the total amount of managed space (in bytes) actually + allocated in the heap. This can be less than the Amount of + Managed Space in Heap field, if some direct blocks in the + heap’s linear address space are not allocated. +

    +
    Fractal Heap Indirect Block Offset (variable size)

    Offset of Direct Block Allocation Iterator in Managed + Space

    +

    + This is the linear heap offset where the next direct block should + be allocated at (in bytes). This may be less than the Amount + of Managed Space in Heap value because the heap’s address + space is increased by a “row” of direct blocks at a + time, rather than by single direct block increments. +

    +
    Block Start RowBlock Start Column

    Number of Managed Objects in Heap

    +

    This is the number of managed objects in the heap.

    +
    Number of BlocksThis space inserted only to align table nicely

    Size of Huge Objects in Heap

    +

    This is the total size of huge objects in the heap (in + bytes).

    +
    -
    -
    -
    - - - - + + + - - + + - - + + - - + + - - + + -
    Field NameDescription

    Number of Huge Objects in Heap

    +

    This is the number of huge objects in the heap.

    +

    Fractal Heap Block Offset

    -

    The offset of the indirect block in the fractal heap’s address - space containing the empty blocks. -

    -

    - The number of bytes used to encode this field is the minimum - number of bytes needed to encode values for the Maximum - Heap Size (in the fractal heap’s header). -

    -

    Size of Tiny Objects in Heap

    +

    This is the total size of tiny objects that are packed in + heap IDs (in bytes).

    +

    Block Start Row

    -

    This is the row that the empty blocks start in. -

    -

    Number of Tiny Objects in Heap

    +

    This is the number of tiny objects that are packed in heap + IDs.

    +

    Block Start Column

    -

    This is the column that the empty blocks start in. -

    -

    Table Width

    +

    This is the number of columns in the doubling table for + managed blocks. This value must be a power of two.

    +

    Number of Blocks

    -

    This is the number of empty blocks covered by the section. -

    -

    Starting Block Size

    +

    This is the starting block size to use in the doubling table + for managed blocks (in bytes). This value must be a power of two.

    +
    -
    - -
    -

    -III.H. Disk Format: Level 1H - Shared Object Header Message Table

    - -

    - The shared object header message table is used to locate - object - header messages that are shared between two or more object headers - in the file. Shared object header messages are stored and indexed - in the file in one of two ways: indexed sequentially in a - shared header message list or indexed with a v2 B-tree. - The shared messages themselves are either stored in a fractal - heap (when two or more objects share the message), or remain in an - object’s header (when only one object uses the message currently, - but the message can be shared in the future). -

    - -

    - The shared object header message table - contains a list of shared message index headers. Each index header - records information about the version of the index format, the index - storage type, flags for the message types indexed, the number of - messages in the index, the address where the index resides, - and the fractal heap address if shared messages are stored there. -

    - -

    - Each index can be either a list or a v2 B-tree and may transition - between those two forms as the number of messages in the index - varies. Each shared message record contains information used to - locate the shared message from either a fractal heap or an object - header. The types of messages that can be shared are: Dataspace, - Datatype, Fill Value, Filter Pipeline and Attribute. -

    - -

    - The shared object header message table is pointed to - from a shared message table message - in the superblock extension for a file. This message stores the - version of the table format, along with the number of index headers - in the table. -

    - -
    - - - - - - + + - + + - - - + + - + + - - + + - - - - - - - - - - - + + + - - + + + - - + + + - - - + + +
    - Shared Object Header Message Table -
    bytebytebytebyte

    Maximum Direct Block Size

    +

    This is the maximum size allowed for a managed direct block. + Objects inserted into the heap that are larger than this value + (less the # of bytes of direct block prefix/suffix) are stored as + ‘huge’ objects. This value must be a power of two.

    +
    Signature

    Maximum Heap Size

    +

    This is the maximum size of the heap’s linear address + space for managed objects (in bytes). The value stored is the log2 + of the actual value, that is: the # of bits of the address space. + ‘Huge’ and ‘tiny’ objects are not counted + in this value, since they do not store objects in the linear + address space of the heap.

    +
    Version for index #0Index Type for index #0Message Type Flags for index #0

    Starting # of Rows in Root Indirect Block

    +

    + This is the starting number of rows for the root indirect block. A + value of 0 indicates that the root indirect block will have the + maximum number of rows needed to address the heap’s Maximum + Heap Size. +

    +
    Minimum Message Size for index #0

    Address of Root Block

    +

    + This is the address of the root block for the heap. It can be the undefined address if there is no data + in the heap. It either points to a direct block (if the Current + # of Rows in the Root Indirect Block value is 0), or an indirect + block. +

    +
    List Cutoff for index #0v2 B-tree Cutoff for index #0

    Current # of Rows in Root Indirect Block

    +

    + This is the current number of rows in the root indirect block. A + value of 0 indicates that Address of Root Block points to + direct block instead of indirect block. +

    +
    Number of Messages for index #0This space inserted only to align table nicely

    Index AddressO for index #0


    Fractal Heap AddressO for index #0

    Size of Filtered Root Direct Block

    +

    + This is the size of the root direct block, if filters are applied + to heap objects (in bytes). This field is only stored in the header + if the I/O Filters’ Encoded Length is greater than + 0. +

    +
    ...

    I/O Filter Mask

    +

    + This is the filter mask for the root direct block, if filters are + applied to heap objects. This mask has the same format as that used + for the filter mask in chunked raw data records in a v1 B-tree. This field is only stored in the + header if the I/O Filters’ Encoded Length is greater + than 0. +

    +
    ...

    I/O Filter Information

    +

    + This is the I/O filter information encoding direct blocks and huge + objects, if filters are applied to heap objects. This field is + encoded as a Filter Pipeline message. + The size of this field is determined by I/O Filters’ + Encoded Length. +

    +
    Version for index #N-1Index Type for index #N-1Message Type Flags for index #N-1

    Checksum

    +

    This is the checksum for the header.

    +
    +
    + +
    +
    +
    + + + - + + + + - - + - - - - - - - - - - - - - - - -
    Fractal Heap Direct Block
    Minimum Message Size for index #N-1bytebytebytebyte
    List Cutoff for index #N-1v2 B-tree Cutoff for index #N-1Signature
    Number of Messages for index #N-1This space inserted only to align table nicely

    Index AddressO for index #N-1


    Fractal Heap AddressO for index #N-1

    Checksum
    + Version + This space inserted + only to align table nicely + - - - -
      - (Items marked with an ‘O’ in the above table are of the size - specified in “Size of Offsets” field in the superblock.) -
    - -
    - -
    -
    - - - - + - - + - - + - - + - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameDescription
    Heap Header AddressO
    +

    Signature

    -

    The ASCII character string “SMTB” is used to - indicate the beginning of the Shared Object Header Message table. - This gives file consistency checking utilities a better chance of - reconstructing a damaged file. -

    -
    Block Offset (variable size)

    Version for index #N

    -

    This is the version number for the list of shared object header message - indexes and this document describes version 0.

    -
    Checksum (optional)

    Index Type for index #N

    -

    The type of index can be an unsorted list or a v2 B-tree. -

    -

    Object Data (variable size)
    +

    Message Type Flags for index #N

    -

    This field indicates the type of messages tracked in the index, - as follows: - - - - - +
    BitsDescription
    -

    0If set, the index tracks Dataspace Messages. -
    1If set, the message tracks Datatype Messages. -
    2If set, the message tracks Fill Value Messages. -
    3If set, the message tracks Filter Pipeline Messages. -
    4If set, the message tracks Attribute Messages. -
    5-15Reserved (zero). -

    + + + + + +
     (Items marked with an ‘O’ in the + above table are of the size specified in “Size of + Offsets” field in the superblock.)
    +
    -

    - An index can track more than one type of message, but each type - of message can only by in one index. -

    - - +
    +
    + + + + + - - + + - - - - - - - + + + - - + + - - + + - - + + - - + + -
    Field NameDescription

    Minimum Message Size for index #N

    -

    This is the message size sharing threshold for the index. - If the encoded size of the message is less than this value, the - message is not shared. -

    -

    Signature

    +

    + The ASCII character string “ + FHDB + ” is used to indicate the beginning of a fractal heap direct + block. This gives file consistency checking utilities a better + chance of reconstructing a damaged file. +

    +

    List Cutoff for index #N

    -

    This is the cutoff value for the indexing of messages to - switch from a list to a v2 B-tree. If the number of messages - is greater than this value, the index should be a v2 B-tree. -

    -

    v2 B-tree Cutoff for index #N

    -

    This is the cutoff value for the indexing of messages to - switch from a v2 B-tree back to a list. If the number of - messages is less than this value, the index should be a list. -

    -

    Version

    +

    This document describes version 0.

    +

    Number of Messages for index #N

    -

    The number of shared messages being tracked for the index. -

    -

    Heap Header Address

    +

    This is the address for the fractal heap header that this + block belongs to. This field is principally used for file integrity + checking.

    +

    Index Address for index #N

    -

    This field is the address of the list or v2 B-tree where the - index nodes reside. -

    -

    Block Offset

    +

    + This is the offset of the block within the fractal heap’s + address space (in bytes). The number of bytes used to encode this + field is the Maximum Heap Size (in the heap’s + header) divided by 8 and rounded up to the next highest integer, + for values that are not a multiple of 8. This value is principally + used for file integrity checking. +

    +

    Fractal Heap Address for index #N

    -

    This field is the address of the fractal heap if shared messages - are stored there. -

    -

    Checksum

    +

    This is the checksum for the direct block.

    +

    + This field is only present if bit 1 of Flags in the + heap’s header is set. +

    +

    Checksum

    -

    This is the checksum for the table.

    -

    Object Data

    +

    + This section of the direct block stores the actual data for objects + in the heap. The size of this section is determined by the direct + block’s size minus the size of the other fields stored in the + direct block (for example, the Signature, Version, + and others including the Checksum if it is present). +

    +
    -
    + + -
    -

    - Shared messages are indexed either with a shared message record - list, described below, or using a v2 B-tree (using record type 7). - The number of records in the shared message record list is - determined in the index’s entry in the shared object header message - table. -

    - -
    - - +
    +
    +
    +
    - Shared Message Record List -
    + - - - - + + + + - + - + + - + - + - + - - + -
    Fractal Heap Indirect Block
    bytebytebytebytebytebytebytebyte
    SignatureSignature
    Shared Message Record #0VersionThis space inserted + only to align table nicely
    Shared Message Record #1
    Heap Header AddressO
    +
    ...Block Offset (variable size)
    Shared Message Record #N-1
    Child Direct Block #0 AddressO
    +
    Checksum
    Size of Filtered Direct Block #0 (optional) + L
    +
    -
    - -
    -
    - - - - + + - - + - - - + - - - + -
    Field NameDescription
    Filter Mask for Direct Block #0 (optional)

    Signature

    -

    The ASCII character string “SMLI” is used to - indicate the beginning of a list of index nodes. - This gives file consistency checking utilities a better chance of - reconstructing a damaged file. -

    -

    Child Direct Block #1 AddressO
    +

    Shared Message Record #N

    -

    The record for locating the shared message, either in the - fractal heap for the index, or an object header (see format for - index nodes below). -

    -

    Size of Filtered Direct Block #1 (optional)L
    +

    Checksum

    -

    This is the checksum for the list. -

    -
    Filter Mask for Direct Block #1 (optional)
    -
    - -
    -

    - The record for each shared message in an index is stored in one of the - following forms: -

    - -
    - - - - - - - + - - + - - + - - + - + -
    - Shared Message Record, for messages stored in a fractal heap -
    bytebytebytebyte...
    Message LocationThis space inserted only to align table nicely
    Child Direct Block #K-1 AddressO
    +
    Hash Value
    Size of Filtered Direct Block #K-1 (optional)L
    +
    Reference CountFilter Mask for Direct Block #K-1 (optional)

    Fractal Heap ID


    Child Indirect Block #0 AddressO
    +
    -
    -
    -
    - - - - + + - - + - - + - - + +
    Field NameDescription

    Child Indirect Block #1 AddressO
    +

    Message Location

    -

    This has a value of 0 indicating that the message is stored in - the heap. -

    -
    ...

    Hash Value

    -

    This is the hash value for the message. -

    -

    Child Indirect Block #N-1 AddressO
    +

    Reference Count

    -

    This is the number of times the message is used in the file. -

    -
    Checksum
    + - - + + -

    Fractal Heap ID

    -

    This is an 8-byte fractal heap ID for the message as stored in - the fractal heap for the index. -

    -
     (Items marked with an ‘O’ in the + above table are of the size specified in “Size of + Offsets” field in the superblock.)
    -
    - -
    -
    -
    - - - - - - - + + +
    - Shared Message Record, for messages stored in an object header -
    bytebytebytebyte (Items marked with an ‘L’ in the above table are + of the size specified in “Size of Lengths” field in the + superblock.)
    + +
    +
    +
    + - - + + - + + - - - + + - + + -
    Message LocationThis space inserted only to align table nicelyField NameDescription
    Hash Value

    Signature

    +

    + The ASCII character string “ + FHIB + ” is used to indicate the beginning of a fractal heap + indirect block. This gives file consistency checking utilities a + better chance of reconstructing a damaged file. +

    +
    ReservedMessage TypeCreation Index

    Version

    +

    This document describes version 0.

    +

    Object Header AddressO

    Heap Header Address

    +

    This is the address for the fractal heap header that this + block belongs to. This field is principally used for file integrity + checking.

    +
    - - - -
      - (Items marked with an ‘O’ in the above table are of the size - specified in “Size of Offsets” field in the superblock.) -
    - -
    - -
    -
    - - - - + + - - + + - - + + - - - + + - - + + - - + + -
    Field NameDescription

    Block Offset

    +

    + This is the offset of the block within the fractal heap’s + address space (in bytes). The number of bytes used to encode this + field is the Maximum Heap Size (in the heap’s + header) divided by 8 and rounded up to the next highest integer, + for values that are not a multiple of 8. This value is principally + used for file integrity checking. +

    +

    Message Location

    -

    This has a value of 1 indicating that the message is stored in - an object header. -

    -

    Child Direct Block #K Address

    +

    This field is the address of the child direct block. The size + of the [uncompressed] direct block can be computed by its offset in + the heap’s linear address space.

    +

    Hash Value

    -

    This is the hash value for the message. -

    -

    Size of Filtered Direct Block #K

    +

    This is the size of the child direct block after passing + through the I/O filters defined for this heap (in bytes). If no I/O + filters are present for this heap, this field is not present.

    +

    Message Type

    -

    This is the message type in the object header. -

    -

    Filter Mask for Direct Block #K

    +

    + This is the I/O filter mask for the filtered direct block. This + mask has the same format as that used for the filter mask in + chunked raw data records in a v1 B-tree. If + no I/O filters are present for this heap, this field is not + present. +

    +

    Creation Index

    -

    This is the creation index of the message within the object - header. -

    -

    Child Indirect Block #N Address

    +

    This field is the address of the child indirect block. The + size of the indirect block can be computed by its offset in the + heap’s linear address space.

    +

    Object Header Address

    -

    This is the address of the object header where the message is - located. -

    -

    Checksum

    +

    This is the checksum for the indirect block.

    +
    -
    + +
    -
    -
    -

    -IV. Disk Format: Level 2 - Data Objects

    - -

    Data objects contain the “real” user-visible information in the file. - These objects compose the scientific data and other information which - are generally thought of as “data” by the end-user. All the - other information in the file is provided as a framework for - storing and accessing these data objects. -

    - -

    A data object is composed of header and data - information. The header information contains the information - needed to interpret the data information for the object as - well as additional “metadata” or pointers to additional - “metadata” used to describe or annotate each object. -

    - -
    -

    -IV.A. Disk Format: Level 2A - Data Object Headers

    - -

    The header information of an object is designed to encompass - all of the information about an object, except for the data itself. - This information includes the dataspace, the datatype, information - about how the data is stored on disk (in external files, compressed, - broken up in blocks, and so on), as well as other information used - by the library to speed up access to the data objects or maintain - a file’s integrity. Information stored by user applications - as attributes is also stored in the object’s header. The header - of each object is not necessarily located immediately prior to the - object’s data in the file and in fact may be located in any - position in the file. The order of the messages in an object header - is not significant.

    - -

    Object headers are composed of a prefix and a set of messages. The - prefix contains the information needed to interpret the messages and - a small amount of metadata about the object, and the messages contain - the majority of the metadata about the object. -

    - -
    -

    -IV.A.1. Disk Format: Level 2A1 - Data Object Header Prefix

    - -
    -

    -IV.A.1.a. Version 1 Data Object Header Prefix

    - -

    Header messages are aligned on 8-byte boundaries for version 1 - object headers. -

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - Version 1 Object Header -
    bytebytebytebyte
    VersionReserved (zero)Total Number of Header Messages
    Object Reference Count
    Object Header Size
    Header Message Type #1Size of Header Message Data #1
    Header Message #1 FlagsReserved (zero)

    Header Message Data #1

    .
    .
    .
    Header Message Type #nSize of Header Message Data #n
    Header Message #n FlagsReserved (zero)

    Header Message Data #n

    -
    +

    An object in the fractal heap is identified by means of a fractal + heap ID, which encodes information to locate the object in the heap. + Currently, the fractal heap stores an object in one of three ways, + depending on the object’s size:

    + +
    + + + + + + + + + + -
    -
    -
    TypeDescription
    Tiny +

    When an object is small enough to be encoded in the heap ID, + the object’s data is embedded in the fractal heap ID itself. + There are 2 sub-types for this type of object: normal and extended. + The sub-type for tiny heap IDs depends on whether the heap ID is + large enough to store objects greater than 16 bytes or not. If the + heap ID length is 18 bytes or smaller, the ‘normal’ + tiny heap ID form is used. If the heap ID length is greater than 18 + bytes in length, the “extended” form is used. See + format description below for both sub-types.

    +
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameDescription

    Version

    -

    This value is used to determine the format of the - information in the object header. When the format of the - object header is changed, the version number - is incremented and can be used to determine how the - information in the object header is formatted. This - is version one (1) (there was no version zero (0)) of the - object header. -

    -

    Total Number of Header Messages

    -

    This value determines the total number of messages listed in - object headers for this object. This value includes the messages - in continuation messages for this object. -

    -

    Object Reference Count

    -

    This value specifies the number of “hard links” to this object - within the current file. References to the object from external - files, “soft links” in this file and object references in this - file are not tracked. -

    -

    Object Header Size

    -

    This value specifies the number of bytes of header message data - following this length field that contain object header messages - for this object header. This value does not include the size of - object header continuation blocks for this object elsewhere in the - file. -

    -

    Header Message #n Type

    -

    This value specifies the type of information included in the - following header message data. The message types for - header messages are defined in sections below. -

    -

    Size of Header Message #n Data

    -

    This value specifies the number of bytes of header - message data following the header message type and length - information for the current message. The size includes - padding bytes to make the message a multiple of eight - bytes. -

    -

    Header Message #n Flags

    -

    This is a bit field with the following definition: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    BitDescription
    0If set, the message data is constant. This is used - for messages like the datatype message of a dataset. -
    1If set, the message is shared and stored - in another location than the object header. The Header - Message Data field contains a Shared Message - (described in the Data Object Header Messages - section below) - and the Size of Header Message Data field - contains the size of that Shared Message. -
    2If set, the message should not be shared. -
    3If set, the HDF5 decoder should fail to open this object - if it does not understand the message’s type and the file - is open with permissions allowing write access to the file. - (Normally, unknown messages can just be ignored by HDF5 - decoders) -
    4If set, the HDF5 decoder should set bit 5 of this - message’s flags (in other words, this bit field) - if it does not understand the message’s type - and the object is modified in any way. (Normally, - unknown messages can just be ignored by HDF5 - decoders) -
    5If set, this object was modified by software that did not - understand this message. - (Normally, unknown messages should just be ignored by HDF5 - decoders) (Can be used to invalidate an index or a similar - feature) -
    6If set, this message is shareable. -
    7If set, the HDF5 decoder should always fail to open this - object if it does not understand the message’s type (whether - it is open for read-only or read-write access). (Normally, - unknown messages can just be ignored by HDF5 decoders) -

    - -

    Header Message #n Data

    -

    The format and length of this field is determined by the - header message type and size respectively. Some header - message types do not require any data and this information - can be eliminated by setting the length of the message to - zero. The data is padded with enough zeroes to make the - size a multiple of eight. -

    -
    -
    - -
    -

    -IV.A.1.b. Version 2 Data Object Header Prefix

    - -

    Note that the “total number of messages” field has been dropped from - the data object header prefix in this version. The number of messages - in the data object header is just determined by the messages encountered - in all the object header blocks.

    - -

    Note also that the fields and messages in this version of data object - headers have no alignment or padding bytes inserted - they are - stored packed together.

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - Version 2 Object Header -
    bytebytebytebyte
    Signature
    VersionFlagsThis space inserted only to align table nicely
    Access time (optional)
    Modification Time (optional)
    Change Time (optional)
    Birth Time (optional)
    Maximum # of compact attributes (optional)Minimum # of dense attributes (optional)
    Size of Chunk #0 (variable size)This space inserted only to align table nicely
    Header Message Type #1Size of Header Message Data #1Header Message #1 Flags
    Header Message #1 Creation Order (optional)This space inserted only to align table nicely

    Header Message Data #1

    .
    .
    .
    Header Message Type #nSize of Header Message Data #nHeader Message #n Flags
    Header Message #n Creation Order (optional)This space inserted only to align table nicely

    Header Message Data #n

    Gap (optional, variable size)
    Checksum
    -
    + + Huge + +

    + When the size of an object is larger than Maximum Size of + Managed Objects in the Fractal Heap Header, the + object’s data is stored on its own in the file and the object + is tracked/indexed via a version 2 B-tree. All huge objects for a + particular fractal heap use the same v2 B-tree. All huge objects + for a particular fractal heap use the same format for their huge + object IDs. +

    -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +
    Field NameDescription

    Signature

    -

    The ASCII character string “OHDR” - is used to indicate the - beginning of an object header. This gives file consistency - checking utilities a better chance of reconstructing a - damaged file. -

    -

    Version

    -

    This field has a value of 2 indicating version 2 of the object header. -

    -

    Flags

    -

    This field is a bit field indicating additional information - about the object header. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Bit(s)Description
    0-1This two bit field determines the size of the - Size of Chunk #0 field. The values are: +

    Depending on whether the IDs for a heap are large enough to + hold the object’s retrieval information and whether I/O + pipeline filters are applied to the heap’s objects, 4 + sub-types are derived for huge object IDs for this heap:

    + +
    - - + + - - + + + - - + + + - - + + + - - + + -
    ValueDescriptionSub-typeDescription
    0The Size of Chunk #0 field is 1 byte. - Directly accessed, non-filtered +

    The object’s address and length are embedded in the + fractal heap ID itself and the object is directly accessed from + them. This allows the object to be accessed without resorting + to the B-tree.

    +
    1The Size of Chunk #0 field is 2 bytes. - Directly accessed, filtered +

    The filtered object’s address, length, filter mask + and de-filtered size are embedded in the fractal heap ID itself + and the object is accessed directly with them. This allows the + object to be accessed without resorting to the B-tree.

    +
    2The Size of Chunk #0 field is 4 bytes. - Indirectly accessed, non-filtered +

    The object is located by using a B-tree key embedded in + the fractal heap ID to retrieve the address and length from the + version 2 B-tree for huge objects. Then, the address and length + are used to access the object.

    +
    3The Size of Chunk #0 field is 8 bytes. - Indirectly accessed, filtered +

    The object is located by using a B-tree key embedded in + the fractal heap ID to retrieve the filtered object’s + address, length, filter mask and de-filtered size from the + version 2 B-tree for huge objects. Then, this information is + used to access the object.

    +

    -
    2If set, attribute creation order is tracked.
    3If set, attribute creation order is indexed.
    4If set, non-default attribute storage phase change - values are stored.
    5If set, access, modification, change and birth times - are stored.
    6-7Reserved

    - -

    Access Time

    -

    This 32-bit value represents the number of seconds after the - UNIX epoch when the object’s raw data was last accessed - (in other words, read or written). -

    -

    This field is present if bit 5 of flags is set. -

    -

    Modification Time

    -

    This 32-bit value represents the number of seconds after - the UNIX epoch when the object’s raw data was last - modified (in other words, written). -

    -

    This field is present if bit 5 of flags is set. -

    -

    Change Time

    -

    This 32-bit value represents the number of seconds after the - UNIX epoch when the object’s metadata was last changed. -

    -

    This field is present if bit 5 of flags is set. -

    -

    Birth Time

    -

    This 32-bit value represents the number of seconds after the - UNIX epoch when the object was created. -

    -

    This field is present if bit 5 of flags is set. -

    -

    Maximum # of compact attributes

    -

    This is the maximum number of attributes to store in the compact - format before switching to the indexed format. -

    -

    This field is present if bit 4 of flags is set. -

    -

    Minimum # of dense attributes

    -

    This is the minimum number of attributes to store in the indexed - format before switching to the compact format. -

    -

    This field is present if bit 4 of flags is set. -

    -

    Size of Chunk #0

    -

    - This unsigned value specifies the number of bytes of header - message data following this field that contain object header - information. -

    -

    - This value does not include the size of object header - continuation blocks for this object elsewhere in the file. -

    -

    - The length of this field varies depending on bits 0 and 1 of - the flags field. -

    -

    Header Message #n Type

    -

    Same format as version 1 of the object header, described above. -

    -

    Size of Header Message #n Data

    -

    This value specifies the number of bytes of header - message data following the header message type and length - information for the current message. The size of messages - in this version does not include any padding bytes. -

    -

    Header Message #n Flags

    -

    Same format as version 1 of the object header, described above. -

    -

    Header Message #n Creation Order

    -

    This field stores the order that a message of a given type - was created in. -

    -

    This field is present if bit 2 of flags is set. -

    -

    Header Message #n Data

    -

    Same format as version 1 of the object header, described above. -

    -

    Gap

    -

    A gap in an object header chunk is inferred by the end of the - messages for the chunk before the beginning of the chunk’s - checksum. Gaps are always smaller than the size of an - object header message prefix (message type + message size + - message flags). -

    -

    Gaps are formed when a message (typically an attribute message) - in an earlier chunk is deleted and a message from a later - chunk that does not quite fit into the free space is moved - into the earlier chunk. -

    -

    Checksum

    -

    This is the checksum for the object header chunk. -

    -
    +
    + + - - - -

    The header message types and the message data associated with - them compose the critical “metadata” about each object. Some - header messages are required for each object while others are - optional. Some optional header messages may also be repeated - several times in the header itself, the requirements and number - of times allowed in the header will be noted in each header - message description below. -

    - - -
    -

    -IV.A.2. Disk Format: Level 2A2 - Data Object Header Messages

    - -

    Data object header messages are small pieces of metadata that are - stored in the data object header for each object in an HDF5 file. - Data object header messages provide the metadata required to describe - an object and its contents, as well as optional pieces of metadata - that annotate the meaning or purpose of the object. -

    - -

    Data object header messages are either stored directly in the data - object header for the object or are shared between multiple objects - in the file. When a message is shared, a flag in the Message Flags - indicates that the actual Message Data - portion of that message is stored in another location (such as another - data object header, or a heap in the file) and the Message Data - field contains the information needed to locate the actual information - for the message. -

    - -

    - The format of shared message data is described here:

    - -
    - - - - - - - - - - - - - - - - - - - - - - - -
    - Shared Message (Version 1) -
    bytebytebytebyte
    VersionTypeReserved (zero)
    Reserved (zero)

    AddressO

    - - - -
      - (Items marked with an ‘O’ in the above table are of the size - specified in “Size of Offsets” field in the superblock.) -
    + Managed + +

    When the size of an object does not meet the above two + conditions, the object is stored and managed via the direct and + indirect blocks based on the doubling table.

    + + + +
    - -
    -
    - - - - - - - - - - - - - - - - - - - - -
    Field NameDescription

    Version

    The version number is used when there are changes in the format - of a shared object message and is described here: - - - - - - - - - - - - - - - -
    VersionDescription
    0Never used.
    1Used by the library before version 1.6.1. -

    -

    Type

    The type of shared message location: - - - - - - - - - - -
    ValueDescription
    0Message stored in another object’s header (a committed - message). -

    -

    Address

    The address of the object header - containing the message to be shared.

    -
    -
    +

    The specific format for each type of heap ID is described below: +

    -
    -
    -
    - - - - - - - - - - - - - - - - - - - -
    - Shared Message (Version 2) -
    bytebytebytebyte
    VersionTypeThis space inserted only to align table nicely

    AddressO

    +
    + + -
    Fractal Heap ID for Tiny Objects (sub-type 1 - + ‘Normal’)
    - - -
      - (Items marked with an ‘O’ in the above table are of the size - specified in “Size of Offsets” field in the superblock.) -
    -
    + byte + byte + byte + byte + -
    -
    - - - - - - - - - - - - - - - - - - - - -
    Field NameDescription

    Version

    The version number is used when there are changes in the format - of a shared object message and is described here: - - - - - - - - - - -
    VersionDescription
    2Used by the library of version 1.6.1 and after. -

    -

    Type

    The type of shared message location: - - - - - - - - - - -
    ValueDescription
    0Message stored in another object’s header (a committed - message). -

    -

    Address

    The address of the object header - containing the message to be shared.

    -
    + + Version, Type & Length + This space inserted + only to align table nicely + -
    -
    -
    - - - - - - - - - - - - - - - - - - - -
    - Shared Message (Version 3) -
    bytebytebytebyte
    VersionTypeThis space inserted only to align table nicely
    Location (variable size)
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - -
    Field NameDescription

    Version

    The version number indicates changes in the format of shared - object message and is described here: - - - - - - - - - - -
    VersionDescription
    3Used by the library of version 1.8 and after. In this - version, the Type field can indicate that - the message is stored in the fractal heap. -

    -

    Type

    The type of shared message location: - - - - - - - - - - - - - - - - - - - - - - - - - - -
    ValueDescription
    0Message is not shared and is not shareable. -
    1Message stored in file’s shared object header message - heap (a shared message). -
    2Message stored in another object’s header (a committed - message). -
    3Message stored is not shared, but is shareable. -

    -

    Location

    This field contains either a Size of Offsets-bytes - address of the object header - containing the message to be shared, or an 8-byte fractal heap ID - for the message in the file’s shared object header message - heap. -

    -
    -
    - - -

    The following is a list of currently defined header messages: -

    - -
    -

    IV.A.2.a. The NIL Message

    - - -
    - - - - - - - - -
    Header Message Name: NIL
    Header Message Type: 0x0000
    Length: Varies
    Status: Optional; may be repeated.
    Description:The NIL message is used to indicate a message which is to be - ignored when reading the header messages for a data object. - [Possibly one which has been deleted for some reason.] -
    Format of Data: Unspecified
    - + +
    Data (variable size) + + +

    -

    IV.A.2.b. The Dataspace Message

    - - -
    - - - - - - - - - - -
    Header Message Name: Dataspace
    Header Message Type: 0x0001
    Length: Varies according to the number of - dimensions, as described in the following table.
    Status: Required for dataset objects; - may not be repeated.
    Description:The dataspace message describes the number of dimensions (in - other words, “rank”) and size of each dimension that - the data object has. This message is only used for datasets which - have a simple, rectilinear, array-like layout; datasets requiring - a more complex layout are not yet supported. -
    Format of Data: See the tables - below.
    - - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - Dataspace Message - Version 1 -
    bytebytebytebyte
    VersionDimensionalityFlagsReserved
    Reserved

    Dimension #1 SizeL

    .
    .
    .

    Dimension #n SizeL


    Dimension #1 Maximum SizeL (optional)

    .
    .
    .

    Dimension #n Maximum SizeL (optional)


    Permutation Index #1L (optional)

    .
    .
    .

    Permutation Index #nL (optional)

    - - +
    +
    - - -
      - (Items marked with an ‘L’ in the above table are of the size - specified in “Size of Lengths” field in the superblock.) -
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameDescription

    Version

    -

    This value is used to determine the format of the - Dataspace Message. When the format of the - information in the message is changed, the version number - is incremented and can be used to determine how the - information in the object header is formatted. This - document describes version one (1) (there was no version - zero (0)). -

    -

    Dimensionality

    -

    This value is the number of dimensions that the data - object has. -

    -

    Flags

    -

    This field is used to store flags to indicate the - presence of parts of this message. Bit 0 (the least - significant bit) is used to indicate that maximum - dimensions are present. Bit 1 is used to indicate that - permutation indices are present. -

    -

    Dimension #n Size

    -

    This value is the current size of the dimension of the - data as stored in the file. The first dimension stored in - the list of dimensions is the slowest changing dimension - and the last dimension stored is the fastest changing - dimension. -

    -

    Dimension #n Maximum Size

    -

    This value is the maximum size of the dimension of the - data as stored in the file. This value may be the special - “unlimited” size which indicates - that the data may expand along this dimension indefinitely. - If these values are not stored, the maximum size of each - dimension is assumed to be the dimension’s current size. -

    -

    Permutation Index #n

    -

    This value is the index permutation used to map - each dimension from the canonical representation to an - alternate axis for each dimension. If these values are - not stored, the first dimension stored in the list of - dimensions is the slowest changing dimension and the last - dimension stored is the fastest changing dimension. -

    -
    -
    - - - -
    -

    Version 2 of the dataspace message dropped the optional - permutation index value support, as it was never implemented in the - HDF5 Library:

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - Dataspace Message - Version 2 -
    bytebytebytebyte
    VersionDimensionalityFlagsType

    Dimension #1 SizeL

    .
    .
    .

    Dimension #n SizeL


    Dimension #1 Maximum SizeL (optional)

    .
    .
    .

    Dimension #n Maximum SizeL (optional)

    + Field Name + Description + - - - -
      - (Items marked with an ‘L’ in the above table are of the size - specified in “Size of Lengths” field in the superblock.) -
    -
    +

    Version, Type & Length

    + +

    This is a bit field with the following definition:

    + + + + + -
    -
    -
    BitDescription
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + +
    Field NameDescription

    Version

    -

    This value is used to determine the format of the - Dataspace Message. This field should be ‘2’ for version 2 - format messages. -

    -

    Dimensionality

    -

    This value is the number of dimensions that the data object has. -

    -

    Flags

    -

    This field is used to store flags to indicate the - presence of parts of this message. Bit 0 (the least - significant bit) is used to indicate that maximum - dimensions are present. -

    -

    Type

    -

    This field indicates the type of the dataspace: - - - - - - - - - - - - - - - - - - -
    ValueDescription
    0A scalar dataspace; in other words, - a dataspace with a single, dimensionless element. -
    1A simple dataspace; in other words, - a dataspace with a rank > 0 and an appropriate # of - dimensions. -
    2A null dataspace; in other words, - a dataspace with no elements. -

    -

    Dimension #n Size

    -

    This value is the current size of the dimension of the - data as stored in the file. The first dimension stored in - the list of dimensions is the slowest changing dimension - and the last dimension stored is the fastest changing - dimension. -

    -

    Dimension #n Maximum Size

    -

    This value is the maximum size of the dimension of the - data as stored in the file. This value may be the special - “unlimited” size which indicates - that the data may expand along this dimension indefinitely. - If these values are not stored, the maximum size of each - dimension is assumed to be the dimension’s current size. -

    -
    6-7The current version of ID format. This document describes + version 0.
    4-5The ID type. Tiny objects have a value of 2. +
    0-3The length of the tiny object. The value stored is one + less than the actual length (since zero-length objects are not + allowed to be stored in the heap). For example, an object of + actual length 1 has an encoded length of 0, an object of actual + length 2 has an encoded length of 1, and so on.
    +

    - - + + + +

    Data

    + +

    This is the data for the object.

    + + + + - -
    -

    IV.A.2.c. The Link Info Message

    - - -
    - - - - - - - - -
    Header Message Name: Link Info
    Header Message Type: 0x002
    Length: Varies
    Status: Optional; may not be - repeated.
    Description:The link info message tracks variable information about the - current state of the links for a “new style” - group’s behavior. Variable information will be stored in - this message and constant information will be stored in the - Group Info message. -
    Format of Data: See the tables - below.
    - - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +
    +
    - Link Info -
    bytebytebytebyte
    VersionFlagsThis space inserted only to align table nicely

    Maximum Creation Index (8 bytes, optional)


    Fractal Heap AddressO


    Address of v2 B-tree for Name IndexO


    Address of v2 B-tree for Creation Order IndexO (optional)

    + -
    Fractal Heap ID for Tiny Objects (sub-type 2 - + ‘Extended’)
    - - - - -
      - (Items marked with an ‘O’ in the above table are of the size - specified in “Size of Offsets” field in the superblock.) -
    -
    + byte + byte + byte + byte + -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + -
    Field NameDescription

    Version

    -

    The version number for this message. This document describes - version 0.

    -

    Flags

    This field determines various optional aspects of the link - info message: - - - - - - - - - - - - - - - - - - - -
    BitDescription
    0If set, creation order for the links is tracked. -
    1If set, creation order for the links is indexed. -
    2-7Reserved

    - -

    Maximum Creation Index

    This 64-bit value is the maximum creation order index value - stored for a link in this group.

    -

    This field is present if bit 0 of flags is set.

    -

    Fractal Heap Address

    -

    - This is the address of the fractal heap to store dense links. - Each link stored in the fractal heap is stored as a - Link Message. -

    -

    - If there are no links in the group, or the group’s links - are stored “compactly” (as object header messages), this - value will be the undefined - address. -

    -

    Address of v2 B-tree for Name Index

    This is the address of the version 2 B-tree to index names of links.

    -

    If there are no links in the group, or the group’s links - are stored “compactly” (as object header messages), this - value will be the undefined - address. -

    -

    Address of v2 B-tree for Creation Order Index

    This is the address of the version 2 B-tree to index creation order of links.

    -

    If there are no links in the group, or the group’s links - are stored “compactly” (as object header messages), this - value will be the undefined - address. -

    -

    This field exists if bit 1 of flags is set.

    -
    Version, Type & LengthExtended LengthThis space inserted + only to align table nicely
    -
    + + Data (variable size) + + +
    -

    IV.A.2.d. The Datatype Message

    - - -
    - - - - - - - - -
    Header Message Name: Datatype
    Header Message Type: 0x0003 -
    Length: Variable
    Status: Required for dataset or committed - datatype (formerly named datatype) objects; may not be repeated. -
    Description:

    The datatype message defines the datatype for each element - of a dataset or a common datatype for sharing between multiple - datasets. A datatype can describe an atomic type like a fixed- - or floating-point type or more complex types like a C struct - (compound datatype), array (array datatype) or C++ vector - (variable-length datatype).

    -

    Datatype messages that are part of a dataset object do not - describe how elements are related to one another; the dataspace - message is used for that purpose. Datatype messages that are part of - a committed datatype (formerly named datatype) message describe - a common datatype that can be shared by multiple datasets in the - file.

    -
    Format of Data: See the tables - below.
    - - -
    - - - - - - - - - - - - - - - - - - - - - - - - -
    - Datatype Message -
    bytebytebytebyte
    Class and VersionClass Bit Field, Bits 0-7Class Bit Field, Bits 8-15Class Bit Field, Bits 16-23
    Size


    Properties


    -
    +
    + + + + + -
    -
    -
    Field NameDescription
    - - - - - - - - + +
    Field NameDescription

    Class and Version

    -

    The version of the datatype message and the datatype’s class - information are packed together in this field. The version - number is packed in the top 4 bits of the field and the class - is contained in the bottom 4 bits. -

    -

    The version number information is used for changes in the - format of the datatype message and is described here: +

    Version, Type & Length

    +

    This is a bit field with the following definition:

    - - + + - - - - - - + + - - + + - - + + -
    VersionDescriptionBitDescription
    0Never used -
    1Used by early versions of the library to encode - compound datatypes with explicit array fields. - See the compound datatype description below for - further details. - 6-7The current version of ID format. This document describes + version 0.
    2Used when an array datatype needs to be encoded. - 4-5The ID type. Tiny objects have a value of 2. +
    3Used when a VAX byte-ordered type needs to be - encoded. Packs various other datatype classes more - efficiently also. - 0-3These 4 bits, together with the next byte, form an + unsigned 12-bit integer for holding the length of the object. + These 4-bits are bits 8-11 of the 12-bit integer. See description + for the Extended Length field below. +

    +
    +

    -

    The class of the datatype determines the format for the class - bit field and properties portion of the datatype message, which - are described below. The - following classes are currently defined: + + - - - - - + + + + - - - - + + + + - - - - +
    ValueDescription

    Extended Length

    +

    This byte, together with the 4 bits in the previous byte, + forms an unsigned 12-bit integer for holding the length of the tiny + object. These 8 bits are bits 0-7 of the 12-bit integer formed. The + value stored is one less than the actual length (since zero-length + objects are not allowed to be stored in the heap). For example, an + object of actual length 1 has an encoded length of 0, an object of + actual length 2 has an encoded length of 1, and so on.

    +
    0Fixed-Point

    Data

    +

    This is the data for the object.

    +
    1Floating-Point
    +

    - - 2 - Time - - - 3 - String - +
    +
    +
    + + - - - - + + + + + + - - - - + + + + - - - - + + + + +
    Fractal Heap ID for Huge Objects (sub-type 1 & 2): + indirectly accessed, non-filtered/filtered
    4Bit field
    bytebytebytebyte
    5Opaque
    Version & TypeThis space inserted + only to align table nicely
    6Compound

    v2 B-tree KeyL + (variable size)
    +
    + + + + + + +
     (Items marked with an ‘L’ in the + above table are of the size specified in “Size of + Lengths” field in the superblock.)
    +
    + +
    +
    + + + + + + + +
    Field NameDescription

    Version & Type

    +

    This is a bit field with the following definition:

    + - - + + - - + + - - - + + - - - + + -
    7ReferenceBitDescription
    8Enumerated6-7The current version of ID format. This document describes + version 0.
    9Variable-Length4-5The ID type. Huge objects have a value of 1. +
    10Array0-3Reserved.

    +
    +

    + + + - - + +

    v2 B-tree Key

    +

    + This field is the B-tree key for retrieving the information from + the version 2 B-tree for huge objects needed to access the object. + See the description of v2 B-tree records + sub-type 1 & 2 for a description of the fields. New key values are + derived from Next Huge Object ID in the Fractal + Heap Header. +

    + - -

    Class Bit Fields

    - -

    The information in these bit fields is specific to each datatype - class and is described below. All bits not defined for a - datatype class are set to zero. -

    - - + +
    - -

    Size

    - -

    The size of a datatype element in bytes. -

    - - +
    +
    +
    + + - - - - + + + + + + -
    Fractal Heap ID for Huge Objects (sub-type 3): + directly accessed, non-filtered

    Properties

    -

    This variable-sized sequence of bytes encodes information - specific to each datatype class and is described for each class - below. If there is no property information specified for a - datatype class, the size of this field is zero bytes. -

    -
    bytebytebytebyte
    -
    + + Version & Type + This space inserted + only to align table nicely + + +
    Address O
    +
    + -
    -

    Class specific information for Fixed-Point Numbers (Class 0):

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - Fixed-point Bit Field Description -
    BitsMeaning

    0

    Byte Order. If zero, byte order is little-endian; - otherwise, byte order is big endian.

    1, 2

    Padding type. Bit 1 is the lo_pad bit and bit 2 - is the hi_pad bit. If a datum has unused bits at either - end, then the lo_pad or hi_pad bit is copied to those - locations.

    3

    Signed. If this bit is set then the fixed-point - number is in 2’s complement form.

    4-23

    Reserved (zero).

    -
    + +
    Length L
    +
    + -
    -
    - - - - - - - - - - - - - - -
    - Fixed-Point Property Description -
    ByteByteByteByte
    Bit OffsetBit Precision
    -
    + -
    -
    - - - - - - - - - - - - - - - +
    Field NameDescription

    Bit Offset

    -

    The bit offset of the first significant bit of the fixed-point - value within the datatype. The bit offset specifies the number - of bits “to the right of” the value (which are set to the - lo_pad bit value). -

    -

    Bit Precision

    -

    The number of bits of precision of the fixed-point value - within the datatype. This value, combined with the datatype - element’s size and the Bit Offset field specifies the number - of bits “to the left of” the value (which are set to the - hi_pad bit value). -

    -
    + + + + + + + + +
     (Items marked with an ‘O’ in the + above table are of the size specified in “Size of + Offsets” field in the superblock.)
     (Items marked with an ‘L’ in the above table are + of the size specified in “Size of Lengths” field in the + superblock.)
    - -
    + +
    +
    + + + + + -
    -

    Class specific information for Floating-Point Numbers (Class 1):

    - -
    -
    Field NameDescription
    - - - - - - - - - - - - - - - - - - - - + +
    - Floating-Point Bit Field Description -
    BitsMeaning

    0, 6

    Byte Order. These two non-contiguous bits specify the - “endianness” of the bytes in the datatype element. - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Bit 6Bit 0Description
    00Byte order is little-endian -
    01Byte order is big-endian -
    10Reserved -
    11Byte order is VAX-endian -

    -

    1, 2, 3

    Padding type. Bit 1 is the low bits pad type, bit 2 - is the high bits pad type, and bit 3 is the internal bits - pad type. If a datum has unused bits at either end or between - the sign bit, exponent, or mantissa, then the value of bit - 1, 2, or 3 is copied to those locations.

    4-5

    Mantissa Normalization. This 2-bit bit field specifies - how the most significant bit of the mantissa is managed. +

    Version & Type

    +

    This is a bit field with the following definition:

    - - + + - - + + - - + + - - + + - - - - -
    ValueDescriptionBitDescription
    0No normalization - 6-7The current version of ID format. This document describes + version 0.
    1The most significant bit of the mantissa is always set - (except for 0.0). - 4-5The ID type. Huge objects have a value of 1. +
    2The most significant bit of the mantissa is not stored, - but is implied to be set. - 0-3Reserved.
    3Reserved. -

    +
    +

    + - + - -

    7

    -

    Reserved (zero).

    - + +

    Address

    +

    This field is the address of the object in the file.

    + + - -

    8-15

    -

    Sign Location. This is the bit position of the sign - bit. Bits are numbered with the least significant bit zero.

    - + +

    Length

    +

    This field is the length of the object in the file.

    + + +
    - -

    16-23

    -

    Reserved (zero).

    - +
    +
    +
    + + -
    Fractal Heap ID for Huge Objects (sub-type 4): + directly accessed, filtered
    -
    + + byte + byte + byte + byte + -
    -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - -
    - Floating-Point Property Description -
    ByteByteByteByte
    Bit OffsetBit Precision
    Exponent LocationExponent SizeMantissa LocationMantissa Size
    Exponent Bias
    -
    + + Version & Type + This space inserted + only to align table nicely + -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + -
    Field NameDescription

    Bit Offset

    -

    The bit offset of the first significant bit of the floating-point - value within the datatype. The bit offset specifies the number - of bits “to the right of” the value. -

    -

    Bit Precision

    -

    The number of bits of precision of the floating-point value - within the datatype. -

    -

    Exponent Location

    -

    The bit position of the exponent field. Bits are numbered with - the least significant bit number zero. -

    -

    Exponent Size

    -

    The size of the exponent field in bits. -

    -

    Mantissa Location

    -

    The bit position of the mantissa field. Bits are numbered with - the least significant bit number zero. -

    -

    Mantissa Size

    -

    The size of the mantissa field in bits. -

    -

    Exponent Bias

    -

    The bias of the exponent field. -

    -

    Address O
    +
    -
    + +
    Length L
    +
    + + + Filter Mask + -
    -

    Class specific information for Time (Class 2):

    - - -
    - - - - - - - - - - - - - - - - - -
    - Time Bit Field Description -
    BitsMeaning

    0

    Byte Order. If zero, byte order is little-endian; - otherwise, byte order is big endian.

    1-23

    Reserved (zero).

    -
    + +
    De-filtered Size L
    +
    + -
    -
    - - - - - - - - - - - -
    - Time Property Description -
    ByteByte
    Bit Precision
    -
    + -
    -
    - - - - - - - - - - +
    Field NameDescription

    Bit Precision

    -

    The number of bits of precision of the time value. -

    -
    + + + + + + + + +
     (Items marked with an ‘O’ in the + above table are of the size specified in “Size of + Offsets” field in the superblock.)
     (Items marked with an ‘L’ in the above table are + of the size specified in “Size of Lengths” field in the + superblock.)
    - -
    + +
    +
    + + + + + -
    -

    Class specific information for Strings (Class 3):

    - - -
    -
    Field NameDescription
    - - - - - - - - - - - - - - - - - - - - - -
    - String Bit Field Description -
    BitsMeaning

    0-3

    Padding type. This four-bit value determines the - type of padding to use for the string. The values are: - - - - - - - - - - - - - - - - - - - - - - - - - - -
    ValueDescription
    0Null Terminate: A zero byte marks the end of the - string and is guaranteed to be present after - converting a long string to a short string. When - converting a short string to a long string the value is - padded with additional null characters as necessary. -
    1Null Pad: Null characters are added to the end of - the value during conversions from short values to long - values but conversion in the opposite direction simply - truncates the value. -
    2Space Pad: Space characters are added to the end of - the value during conversions from short values to long - values but conversion in the opposite direction simply - truncates the value. This is the Fortran - representation of the string. -
    3-15Reserved -

    -

    4-7

    Character Set. The character set used to - encode the string. - - - - - - - - - - - - - - - - - - - - -
    ValueDescription
    0ASCII character set encoding -
    1UTF-8 character set encoding -
    2-15Reserved -

    -

    8-23

    Reserved (zero).

    -
    - -

    There are no properties defined for the string class. -

    - - -

    Class specific information for bit fields (Class 4):

    - -
    - - - - - - - - - - - - - - - - - - - - - - -
    - Bitfield Bit Field Description -
    BitsMeaning

    0

    Byte Order. If zero, byte order is little-endian; - otherwise, byte order is big endian.

    1, 2

    Padding type. Bit 1 is the lo_pad type and bit 2 - is the hi_pad type. If a datum has unused bits at either - end, then the lo_pad or hi_pad bit is copied to those - locations.

    3-23

    Reserved (zero).

    -
    + +

    Version & Type

    + +

    This is a bit field with the following definition:

    + + + + + -
    -
    -
    BitDescription
    - - - - - - - - - - - - - -
    - Bit Field Property Description -
    ByteByteByteByte
    Bit OffsetBit Precision
    - + + 6-7 + The current version of ID format. This document describes + version 0. + + + 4-5 + The ID type. Huge objects have a value of 1. + + + + 0-3 + Reserved. + + +

    -
    -
    - - - - - - - - - - - - - - - -
    Field NameDescription

    Bit Offset

    -

    The bit offset of the first significant bit of the bit field - within the datatype. The bit offset specifies the number - of bits “to the right of” the value. -

    -

    Bit Precision

    -

    The number of bits of precision of the bit field - within the datatype. -

    -
    -
    + + + +

    Address

    +

    This field is the address of the filtered object in + the file.

    + -
    -

    Class specific information for Opaque (Class 5):

    - -
    - - - - - - - - - - - - - - - - - -
    - Opaque Bit Field Description -
    BitsMeaning

    0-7

    Length of ASCII tag in bytes.

    8-23

    Reserved (zero).

    -
    + +

    Length

    +

    This field is the length of the filtered object in + the file.

    + -
    -
    - - - - - - - - - - - - - -
    - Opaque Property Description -
    ByteByteByteByte

    ASCII Tag
    -
    -
    + +

    Filter Mask

    +

    This field is the I/O pipeline filter mask for the + filtered object in the file.

    + -
    -
    - - - - - - - - - - -
    Field NameDescription

    ASCII Tag

    -

    This NUL-terminated string provides a description for the - opaque type. It is NUL-padded to a multiple of 8 bytes. -

    -
    -
    + +

    Filtered Size

    +

    This field is the size of the de-filtered object in + the file.

    + + + -
    -

    Class specific information for Compound (Class 6):

    - -
    - - - - - - - - - - - - - - - - - -
    - Compound Bit Field Description -
    BitsMeaning

    0-15

    Number of Members. This field contains the number - of members defined for the compound datatype. The member - definitions are listed in the Properties field of the data - type message.

    16-23

    Reserved (zero).

    -
    +
    +
    +
    + + + + + + + + -

    The Properties field of a compound datatype is a list of the - member definitions of the compound datatype. The member - definitions appear one after another with no intervening bytes. - The member types are described with a (recursively) encoded datatype - message.

    + + + + + + + -

    Note that the property descriptions are different for different - versions of the datatype version. Additionally note that the version - 0 datatype encoding is deprecated and has been replaced with later - encodings in versions of the HDF5 Library from the 1.4 release - onward.

    + + + +
    Fractal Heap ID for Managed Objects
    bytebytebytebyte
    Version & TypeThis space inserted + only to align table nicely
    Offset (variable size)
    Length (variable size)
    +
    +
    +
    + + + + + -
    -
    Field NameDescription
    - + + + + - - - + + + + - - - + + + + +
    - Compound Properties Description for Datatype Version 1 -

    Version & Type

    This is a bit field with the following definition:

    + + + + + - - - - - - + + + + + + + + + + + + +
    BitDescription
    ByteByteByteByte
    6-7The current version of ID format. This document describes + version 0.
    4-5The ID type. Managed objects have a value of 0. +
    0-3Reserved.
    +


    Name

    Offset

    + This field is the offset of the object in the heap. This + field’s size is the minimum number of bytes necessary to + encode the Maximum Heap Size value (from the Fractal + Heap Header). For example, if the value of the Maximum + Heap Size is less than 256 bytes, this field is 1 byte in length, + a Maximum Heap Size of 256-65535 bytes uses a 2 byte + length, and so on. +

    Byte Offset of Member

    Length

    + This field is the length of the object in the heap. It is + determined by taking the minimum value of Maximum Direct + Block Size and Maximum Size of Managed Objects in the Fractal + Heap Header. Again, the minimum number of bytes needed to encode + that value is used for the size of this field. +

    +
    - - Dimensionality - Reserved (zero) - +
    +

    + III.G. Disk Format: Level 1G - + Free-space Manager +

    - - Dimension Permutation - +

    Free-space managers are used to describe space within a heap or + the entire HDF5 file that is not currently used for that heap or file. +

    - - Reserved (zero) - +

    + The free-space manager header contains metadata information + about the space being tracked, along with the address of the list of free + space sections which actually describes the free space. The header + records information about free-space sections being tracked, creation + parameters for handling free-space sections of a client, and section + information used to locate the collection of free-space sections. +

    - - Dimension #1 Size (required) - +

    + The free-space section list stores a collection of free-space + sections that is specific to each client of the free-space + manager. For example, the fractal heap is a client of the free space + manager and uses it to track unused space within the heap. There are 4 + types of section records for the fractal heap, each of which has its + own format, listed below. +

    - - Dimension #2 Size (required) - +
    + + - - - + + + + + + - - - + + + - - - + + + + + -
    Free-space Manager Header
    Dimension #3 Size (required)
    bytebytebytebyte
    Dimension #4 Size (required)
    Signature

    Member Type Message

    VersionClient IDThis space inserted + only to align table nicely
    -
    + +
    Total Space TrackedL
    +
    + -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + -
    Field NameDescription

    Name

    -

    This NUL-terminated string provides a description for the - opaque type. It is NUL-padded to a multiple of 8 bytes. -

    -

    Byte Offset of Member

    -

    This is the byte offset of the member within the datatype. -

    -

    Dimensionality

    -

    If set to zero, this field indicates a scalar member. If set - to a value greater than zero, this field indicates that the - member is an array of values. For array members, the size of - the array is indicated by the ‘Size of Dimension n’ field in - this message. -

    -

    Dimension Permutation

    -

    This field was intended to allow an array field to have - its dimensions permuted, but this was never implemented. - This field should always be set to zero. -

    -

    Dimension #n Size

    -

    This field is the size of a dimension of the array field as - stored in the file. The first dimension stored in the list of - dimensions is the slowest changing dimension and the last - dimension stored is the fastest changing dimension. -

    -

    Member Type Message

    -

    This field is a datatype message describing the datatype of - the member. -

    -

    Total Number of SectionsL
    +
    -
    + +
    Number of Serialized SectionsL
    +
    + -
    -
    -
    - - - - - - - - - - - - - - - - - - - - - + + + -
    - Compound Properties Description for Datatype Version 2 -
    ByteByteByteByte

    Name

    Byte Offset of Member

    Member Type Message


    Number of Un-Serialized SectionsL
    +
    -
    + + Number of Section Classes + This space inserted + only to align table nicely + -
    -
    - - - - - - - - - - - - - - - - - - - - + + + + -
    Field NameDescription

    Name

    -

    This NUL-terminated string provides a description for the - opaque type. It is NUL-padded to a multiple of 8 bytes. -

    -

    Byte Offset of Member

    -

    This is the byte offset of the member within the datatype. -

    -

    Member Type Message

    -

    This field is a datatype message describing the datatype of - the member. -

    -
    Shrink PercentExpand Percent
    -
    + + Size of Address Space + This space inserted + only to align table nicely + + +
    Maximum Section Size L
    +
    + -
    -
    -
    - - - - - - - - - - - - - - - - - - - - - + + + -
    - Compound Properties Description for Datatype Version 3 -
    ByteByteByteByte

    Name

    Byte Offset of Member (variable size)

    Member Type Message


    Address of Serialized Section ListO
    +
    -
    + +
    Size of Serialized Section List UsedL
    +
    + -
    -
    - - - - - - - - - - - - - - - - - - - - + + + -
    Field NameDescription

    Name

    This NUL-terminated string provides a description for the - opaque type. It is not NUL-padded to a multiple of 8 - bytes.

    Byte Offset of Member

    This is the byte offset of the member within the datatype. - The field size is the minimum number of bytes necessary, - based on the size of the datatype element. For example, a - datatype element size of less than 256 bytes uses a 1 byte - length, a datatype element size of 256-65535 bytes uses a - 2 byte length, and so on.

    Member Type Message

    This field is a datatype message describing the datatype of - the member.


    Allocated Size of Serialized Section ListL
    +
    -
    + + Checksum + + + + + + + + + + + +
     (Items marked with an ‘O’ in the + above table are of the size specified in “Size of + Offsets” field in the superblock.)
     (Items marked with an ‘L’ in the above table are + of the size specified in “Size of Lengths” field in the + superblock.)
    -
    -

    Class specific information for Reference (Class 7):

    - -
    - - - - - - - - - - - - - - - - - -
    - Reference Bit Field Description -
    BitsMeaning

    0-3

    Type. This four-bit value contains the type of reference - described. The values defined are: - - - - - - - - - - - - - - - - - - - - - -
    ValueDescription
    0Object Reference: A reference to another object in this - HDF5 file. -
    1Dataset Region Reference: A reference to a region within - a dataset in this HDF5 file. -
    2-15Reserved -

    - -

    4-23

    Reserved (zero).

    -
    + -

    There are no properties defined for the reference class. -

    +
    +
    + + + + + + + + + -
    -

    Class specific information for Enumeration (Class 8):

    - -
    -
    Field NameDescription

    Signature

    +

    + The ASCII character string “ + FSHD + ” is used to indicate the beginning of the Free-space Manager + Header. This gives file consistency checking utilities a better + chance of reconstructing a damaged file. +

    +
    - - - - - - - - - - - - - - - - -
    - Enumeration Bit Field Description -
    BitsMeaning

    0-15

    Number of Members. The number of name/value - pairs defined for the enumeration type.

    16-23

    Reserved (zero).

    -
    + +

    Version

    + +

    This is the version number for the Free-space Manager Header + and this document describes version 0.

    + + -
    -
    -
    - - + + + - - + + - - - + + + + - - + + + -
    - Enumeration Property Description for Datatype Versions 1 & 2 -

    Client ID

    +

    This is the client ID for identifying the user of this + free-space manager:

    + + + + + - - - - - - + + + + + + + + + + + + +
    IDDescription
    ByteByteByteByte
    0Fractal heap
    1File
    2+Reserved.
    +

    -

    Base Type


    Names

    Total Space Tracked

    +

    This is the total amount of free space being tracked, in + bytes.

    +

    Values

    Total Number of Sections

    +

    This is the total number of free-space sections being + tracked.

    +
    -
    + +

    Number of Serialized Sections

    + +

    This is the number of serialized free-space sections being + tracked.

    + + + +

    Number of Un-Serialized Sections

    + +

    This is the number of un-serialized free-space sections being + managed. Un-serialized sections are created by the free-space + client when the list of sections is read in.

    + + -
    -
    - - - - - - - - - - - - - - - - - - - - + + + + -
    Field NameDescription

    Base Type

    -

    Each enumeration type is based on some parent type, usually an - integer. The information for that parent type is described - recursively by this field. -

    -

    Names

    -

    The name for each name/value pair. Each name is stored as a null - terminated ASCII string in a multiple of eight bytes. The names - are in no particular order. -

    -

    Values

    -

    The list of values in the same order as the names. The values - are packed (no inter-value padding) and the size of each value - is determined by the parent type. -

    -

    Number of Section Classes

    +

    This is the number of section classes handled by this free + space manager for the free-space client.

    +
    -
    + +

    Shrink Percent

    + +

    This is the percent of current size to shrink the allocated + serialized free-space section list.

    + + -
    -
    -
    - - + + + + - - - - - - + + + + - - - + + + + - - - + + + + - - + + + -
    - Enumeration Property Description for Datatype Version 3 -

    Expand Percent

    +

    This is the percent of current size to expand the allocated + serialized free-space section list.

    +
    ByteByteByteByte

    Size of Address Space

    +

    + This is the size of the address space that free-space sections are + within. This is stored as the log2 of the actual value + (in other words, the number of bits required to store values within + that address space). +

    +

    Base Type

    Maximum Section Size

    +

    This is the maximum size of a section to be tracked.

    +

    Names

    Address of Serialized Section List

    +

    This is the address where the serialized free-space section + list is stored.

    +

    Values

    Size of Serialized Section List Used

    +

    + This is the size of the serialized free-space section list used (in + bytes). This value must be less than or equal to the allocated + size of serialized section list, below. +

    +
    -
    + +

    Allocated Size of Serialized Section List

    + +

    This is the size of serialized free-space section list + actually allocated (in bytes).

    + + -
    -
    - - - - - - - - - - - - - - - - - - - - + + + + -
    Field NameDescription

    Base Type

    -

    Each enumeration type is based on some parent type, usually an - integer. The information for that parent type is described - recursively by this field. -

    -

    Names

    -

    The name for each name/value pair. Each name is stored as a null - terminated ASCII string, not padded to a multiple of - eight bytes. The names are in no particular order. -

    -

    Values

    -

    The list of values in the same order as the names. The values - are packed (no inter-value padding) and the size of each value - is determined by the parent type. -

    -

    Checksum

    +

    This is the checksum for the free-space manager header.

    +
    -
    + + +
    +

    + The free-space sections being managed are stored in a free-space + section list, described below. The sections in the free-space section + list are stored in the following way: a count of the number of sections + describing a particular size of free space and the size of the + free-space described (in bytes), followed by a list of section + description records; then another section count and size, followed by + the list of section descriptions for that size; and so on. +

    -
    -

    Class specific information for Variable-Length (Class 9):

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - Variable-Length Bit Field Description -
    BitsMeaning

    0-3

    Type. This four-bit value contains the type of - variable-length datatype described. The values defined are: - - - - - - - - - - - - - - - - - - - - - -
    ValueDescription
    0Sequence: A variable-length sequence of any datatype. - Variable-length sequences do not have padding or - character set information. -
    1String: A variable-length sequence of characters. - Variable-length strings have padding and character set - information. -
    2-15Reserved -

    - -

    4-7

    Padding type. (variable-length string only) - This four-bit value determines the type of padding - used for variable-length strings. The values are the same - as for the string padding type, as follows: - - - - - - - - - - - - - - - - - - - - - - - - - -
    ValueDescription
    0Null terminate: A zero byte marks the end of a string - and is guaranteed to be present after converting a long - string to a short string. When converting a short string - to a long string, the value is padded with additional null - characters as necessary. -
    1Null pad: Null characters are added to the end of the - value during conversion from a short string to a longer - string. Conversion from a long string to a shorter string - simply truncates the value. -
    2Space pad: Space characters are added to the end of the - value during conversion from a short string to a longer - string. Conversion from a long string to a shorter string - simply truncates the value. This is the Fortran - representation of the string. -
    3-15Reserved -

    - -

    This value is set to zero for variable-length sequences.

    - -

    8-11

    Character Set. (variable-length string only) - This four-bit value specifies the character set - to be used for encoding the string: - - - - - - - - - - - - - - - - - - - - -
    ValueDescription
    0ASCII character set encoding -
    1UTF-8 character set encoding -
    2-15Reserved -

    - -

    This value is set to zero for variable-length sequences.

    - -

    12-23

    Reserved (zero).

    -
    +
    + + -
    -
    -
    -
    Free-space Section List
    - + + + + + + - - - - + - + + -
    - Variable-Length Property Description -
    bytebytebytebyte
    ByteByteByteByteSignature

    Base Type

    VersionThis space inserted + only to align table nicely
    -
    + +
    Free-space Manager Header AddressO
    +
    + -
    -
    - - - - - - - - - - + + + -
    Field NameDescription

    Base Type

    -

    Each variable-length type is based on some parent type. The - information for that parent type is described recursively by - this field. -

    -
    Number of Section Records in Set #0 (variable + size)
    -
    + + Size of Free-space Section Described in Record + Set #0 (variable size) + + + + + Record Set #0 Section Record #0 Offset(variable + size) + + + Record Set #0 Section Record #0 Type + This space inserted + only to align table nicely + -
    -

    Class specific information for Array (Class 10):

    + + Record Set #0 Section Record #0 Data (variable + size) + -

    There are no bit fields defined for the array class. -

    + + ... + -

    Note that the dimension information defined in the property for this - datatype class is independent of dataspace information for a dataset. - The dimension information here describes the dimensionality of the - information within a data element (or a component of an element, if the - array datatype is nested within another datatype) and the dataspace for a - dataset describes the size and locations of the elements in a dataset. -

    + + Record Set #0 Section Record #K-1 Offset(variable + size) + + + Record Set #0 Section Record #K-1 Type + This space inserted + only to align table nicely + -
    - - + + + - - - - + - - - - + + + - - - - - - - - - + + + - - - - - - - - - + + + + - + -
    - Array Property Description for Datatype Version 2 -
    Record Set #0 Section Record #K-1 Data (variable + size)
    ByteByteByteByteNumber of Section Records in Set #1 (variable + size)
    DimensionalityReserved (zero)
    Size of Free-space Section Described in Record + Set #1 (variable size) +
    Dimension #1 Size
    .
    .
    .
    Dimension #n Size
    Record Set #1 Section Record #0 Offset(variable + size)
    Permutation Index #1
    .
    .
    .
    Permutation Index #n
    Record Set #1 Section Record #0 TypeThis space inserted + only to align table nicely

    Base Type

    Record Set #1 Section Record #0 Data (variable + size)
    -
    + + ... + -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - + + + -
    Field NameDescription

    Dimensionality

    -

    This value is the number of dimensions that the array has. -

    -

    Dimension #n Size

    -

    This value is the size of the dimension of the array - as stored in the file. The first dimension stored in - the list of dimensions is the slowest changing dimension - and the last dimension stored is the fastest changing - dimension. -

    -

    Permutation Index #n

    -

    This value is the index permutation used to map - each dimension from the canonical representation to an - alternate axis for each dimension. Currently, dimension - permutations are not supported, and these indices should - be set to the index position minus one. In other words, - the first dimension should be set to 0, the second dimension - should be set to 1, and so on. -

    -

    Base Type

    -

    Each array type is based on some parent type. The - information for that parent type is described recursively by - this field. -

    -
    Record Set #1 Section Record #K-1 Offset(variable + size)
    -
    + + Record Set #1 Section Record #K-1 Type + This space inserted + only to align table nicely + -
    -
    - - + + + - - - - + - - - - + + + - - - - - - - - - + + + - + -
    - Array Property Description for Datatype Version 3 -
    Record Set #1 Section Record #K-1 Data (variable + size)
    ByteByteByteByte...
    DimensionalityThis space inserted only to align table nicely
    ...
    Dimension #1 Size
    .
    .
    .
    Dimension #n Size
    Number of Section Records in Set #N-1 (variable + size)

    Base Type

    Size of Free-space Section Described in Record + Set #N-1 (variable size) +
    -
    + + Record Set #N-1 Section Record #0 Offset(variable + size) + -
    -
    - - - - - - - - - - - - - - - - - - - - + + + + -
    Field NameDescription

    Dimensionality

    -

    This value is the number of dimensions that the array has. -

    -

    Dimension #n Size

    -

    This value is the size of the dimension of the array - as stored in the file. The first dimension stored in - the list of dimensions is the slowest changing dimension - and the last dimension stored is the fastest changing - dimension. -

    -

    Base Type

    -

    Each array type is based on some parent type. The - information for that parent type is described recursively by - this field. -

    -
    Record Set #N-1 Section Record #0 TypeThis space inserted + only to align table nicely
    -
    + + Record Set #N-1 Section Record #0 Data (variable + size) + + + ... + + + Record Set #N-1 Section Record #K-1 Offset(variable + size) + -
    -

    IV.A.2.e. The Data Storage - -Fill Value (Old) Message

    + + Record Set #N-1 Section Record #K-1 Type + This space inserted + only to align table nicely + - -
    - - - - - - - - -
    Header Message Name: Fill Value - (old)
    Header Message Type: 0x0004
    Length: Varies
    Status: Optional; may not be - repeated.
    Description:

    The fill value message stores a single data value which - is returned to the application when an uninitialized data element - is read from a dataset. The fill value is interpreted with the - same datatype as the dataset. If no fill value message is present - then a fill value of all zero bytes is assumed.

    -

    This fill value message is deprecated in favor of the - “new” fill value message (Message Type 0x0005) and - is only written to the file for forward compatibility with - versions of the HDF5 Library before the 1.6.0 version. - Additionally, it only appears for datasets with a user-defined - fill value (as opposed to the library default fill value or an - explicitly set “undefined” fill value).

    -
    Format of Data: See the tables - below.
    - - -
    - - - - - - - - - - - - - - - - - -
    - Fill Value Message (Old) -
    bytebytebytebyte
    Size

    Fill Value (optional, variable size)

    -
    + + Record Set #N-1 Section Record #K-1 Data (variable + size) + -
    -
    - - - - - - - - - - - - - - - -
    Field NameDescription

    Size

    -

    This is the size of the Fill Value field in bytes. -

    -

    Fill Value

    -

    The fill value. The bytes of the fill value are interpreted - using the same datatype as for the dataset. -

    -
    -
    + + Checksum + + + + + + + +
     (Items marked with an ‘O’ in the + above table are of the size specified in “Size of + Offsets” field in the superblock.)
    +
    -

    IV.A.2.f. The Data Storage - -Fill Value Message

    +
    + + + + + - -
    -
    Field NameDescription
    - - - - - - - -
    Header Message Name: Fill - Value
    Header Message Type: 0x0005
    Length: Varies
    Status: Required for dataset objects; - may not be repeated.
    Description:The fill value message stores a single data value which is - returned to the application when an uninitialized data element - is read from a dataset. The fill value is interpreted with the - same datatype as the dataset.
    Format of Data: See the tables - below.
    - - -
    - - - - - - - - - - - - - - - - - - - - - - - - -
    - Fill Value Message - Versions 1 & 2 -
    bytebytebytebyte
    VersionSpace Allocation TimeFill Value Write TimeFill Value Defined
    Size (optional)

    Fill Value (optional, variable size)

    -
    + +

    Signature

    + +

    + The ASCII character string “ + FSSE + ” is used to indicate the beginning of the Free-space Section + Information. This gives file consistency checking utilities a + better chance of reconstructing a damaged file. +

    + + -
    -
    - - - - - - - - - - + + + + - - - + + + + + - +

    + The length of this field is the minimum number of bytes needed to + store the maximum section size (from the free-space + manager header). +

    + + - - - - - - - - - - - - - - - - - - - -
    Field NameDescription

    Version

    -

    The version number information is used for changes in the - format of the fill value message and is described here: - - - - - + + + + - - - - - - - - - - - - - - - - -
    VersionDescription

    Version

    +

    This is the version number for the Free-space Section List + and this document describes version 0.

    +
    0Never used -
    1Initial version of this message. -
    2In this version, the Size and Fill Value fields are - only present if the Fill Value Defined field is set - to 1. -
    3This version packs the other fields in the message - more efficiently than version 2. -

    -

    -

    Free-space Manager Header Address

    +

    + This is the address of the Free-space Manager Header. This + field is principally used for file integrity checking. +

    +

    Space Allocation Time

    -

    When the storage space for the dataset’s raw data will be - allocated. The allowed values are: - - - - - + + + - - - - - - - - - - - - - - - -
    ValueDescription

    Number of Section Records for Set #N

    +

    + This is the number of free-space section records for set #N. The + length of this field is the minimum number of bytes needed to store + the number of serialized sections (from the free-space + manager header). +

    -
    0Not used. -
    1Early allocation. Storage space for the entire dataset - should be allocated in the file when the dataset is - created. -
    2Late allocation. Storage space for the entire dataset - should not be allocated until the dataset is written - to. -
    3Incremental allocation. Storage space for the - dataset should not be allocated until the portion - of the dataset is written to. This is currently - used in conjunction with chunked data storage for - datasets. -

    +

    + The number of sets of free-space section records is determined by + the size of serialized section list in the free-space + manager header. +

    +

    Section Size for Record Set #N

    +

    + This is the size (in bytes) of the free-space section described for + all the section records in set #N. +

    -

    Fill Value Write Time

    -

    At the time that storage space for the dataset’s raw data is - allocated, this value indicates whether the fill value should - be written to the raw data storage elements. The allowed values - are: - - - - - - - - - - - - - - - - - - -
    ValueDescription
    0On allocation. The fill value is always written to - the raw data storage when the storage space is allocated. -
    1Never. The fill value should never be written to - the raw data storage. -
    2Fill value written if set by user. The fill value - will be written to the raw data storage when the storage - space is allocated only if the user explicitly set - the fill value. If the fill value is the library - default or is undefined, it will not be written to - the raw data storage. -

    - -

    Fill Value Defined

    -

    This value indicates if a fill value is defined for this - dataset. If this value is 0, the fill value is undefined. - If this value is 1, a fill value is defined for this dataset. - For version 2 or later of the fill value message, this value - controls the presence of the Size and Fill Value fields. -

    -

    Size

    -

    This is the size of the Fill Value field in bytes. This field - is not present if the Version field is greater than 1, - and the Fill Value Defined field is set to 0. -

    -

    Fill Value

    -

    The fill value. The bytes of the fill value are interpreted - using the same datatype as for the dataset. This field is - not present if the Version field is greater than 1, - and the Fill Value Defined field is set to 0. -

    -
    -
    + +

    Record Set #N Section #K Offset

    + +

    This is the offset (in bytes) of the free-space section + within the client for the free-space manager.

    -
    -
    - - - - - - - - - - - - - - - - - - - - - - - -
    - Fill Value Message - Version 3 -
    bytebytebytebyte
    VersionFlagsThis space inserted only to align table nicely
    Size (optional)

    Fill Value (optional, variable size)

    -
    +

    + The length of this field is the minimum number of bytes needed to + store the size of address space (from the free-space + manager header). +

    + + -
    -
    - - - - - - - - - + +
    Field NameDescription

    Version

    -

    The version number information is used for changes in the - format of the fill value message and is described here: +

    Record Set #N Section #K Type

    +

    + This is the type of the section record, used to decode the record + set #N section #K data information. The defined record type for file + client is: + +

    - - + + - - - - - - + + - - + + - - - - -
    VersionDescriptionTypeDescription
    0Never used -
    1Initial version of this message. - 0File’s section (a range of actual bytes in file)
    2In this version, the Size and Fill Value fields are - only present if the Fill Value Defined field is set - to 1. - 1+Reserved.
    3This version packs the other fields in the message - more efficiently than version 2. -

    +
    +

    - - +

    + The defined record types for a fractal heap client are: - -

    Flags

    - -

    When the storage space for the dataset’s raw data will be - allocated. The allowed values are: +

    - - + + - - + + + - - + + + - - + + + - - + + + - - + + -
    BitsDescriptionTypeDescription
    0-1Space Allocation Time, with the same - values as versions 1 and 2 of the message. - 0Fractal heap “single” section
    2-3Fill Value Write Time, with the same - values as versions 1 and 2 of the message. - 1Fractal heap “first row” section
    4Fill Value Undefined, indicating that the fill - value has been marked as “undefined” for this dataset. - Bits 4 and 5 cannot both be set. - 2Fractal heap “normal row” section
    5Fill Value Defined, with the same values as - versions 1 and 2 of the message. - Bits 4 and 5 cannot both be set. - 3Fractal heap “indirect” section
    6-7Reserved (zero). - 4+Reserved.

    + +

    - - + + - -

    Size

    - -

    This is the size of the Fill Value field in bytes. This field - is not present if the Version field is greater than 1, - and the Fill Value Defined flag is set to 0. -

    - - + +

    Record Set #N Section #K Data

    + +

    This is the section-type specific information for each record + in the record set, described below.

    + + - -

    Fill Value

    - -

    The fill value. The bytes of the fill value are interpreted - using the same datatype as for the dataset. This field is - not present if the Version field is greater than 1, - and the Fill Value Defined flag is set to 0. -

    - - - -
    + +

    Checksum

    + +

    + This is the checksum for the Free-space Section List. +

    + + + +

    -

    IV.A.2.g. The Link Message

    +

    The section-type specific data for each free-space section record + is described below:

    - -
    - - - - - - - - -
    Header Message Name: Link
    Header Message Type: 0x0006
    Length: Varies
    Status: Optional; may be - repeated.
    Description:

    This message encodes the information for a link in a - group’s object header, when the group is storing its links - “compactly”, or in the group’s fractal heap, - when the group is storing its links “densely”.

    -

    A group is storing its links compactly when the fractal heap - address in the Link Info - Message is set to the “undefined address” - value.

    Format of Data: See the tables - below.
    - - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - Link Message -
    bytebytebytebyte
    VersionFlagsLink type (optional)This space inserted only to align table nicely

    Creation Order (8 bytes, optional)

    Link Name Character Set (optional)Length of Link Name (variable size)This space inserted only to align table nicely
    Link Name (variable size)

    Link Information (variable size)

    -
    +
    + + -
    -
    -
    File’s Section Data Record
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + -
    Field NameDescription

    Version

    The version number for this message. This document describes version 1.

    -

    Flags

    This field contains information about the link and controls - the presence of other fields below. - - - - - - - - - - - - - - - - - - - - - - - - - - -
    BitsDescription
    0-1Determines the size of the Length of Link Name - field. - - - - - - - - - - - - - - - - - - - - - - -
    ValueDescription
    0The size of the Length of Link Name - field is 1 byte. -
    1The size of the Length of Link Name - field is 2 bytes. -
    2The size of the Length of Link Name - field is 4 bytes. -
    3The size of the Length of Link Name - field is 8 bytes. -
    -
    2Creation Order Field Present: if set, the Creation - Order field is present. If not set, creation order - information is not stored for links in this group. -
    3Link Type Field Present: if set, the link is not - a hard link and the Link Type field is present. - If not set, the link is a hard link. -
    4Link Name Character Set Field Present: if set, the - link name is not represented with the ASCII character - set and the Link Name Character Set field is - present. If not set, the link name is represented with - the ASCII character set. -
    5-7Reserved (zero). -

    - -

    Link type

    This is the link class type and can be one of the following - values: - - - - - - - - - - - - - - - - - - - - - - - - - - -
    ValueDescription
    0A hard link (should never be stored in the file) -
    1A soft link. -
    2-63Reserved for future HDF5 internal use. -
    64An external link. -
    65-255Reserved, but available for user-defined link types. -

    - -

    This field is present if bit 3 of Flags is set.

    -

    Creation Order

    This 64-bit value is an index of the link’s creation time within - the group. Values start at 0 when the group is created an increment - by one for each link added to the group. Removing a link from a - group does not change existing links’ creation order field. -

    -

    This field is present if bit 2 of Flags is set.

    -

    Link Name Character Set

    This is the character set for encoding the link’s name: - - - - - - - - - - - - - - - -
    ValueDescription
    0ASCII character set encoding (this should never be stored - in the file) -
    1UTF-8 character set encoding -

    - -

    This field is present if bit 4 of Flags is set.

    -

    Length of link name

    This is the length of the link’s name. The size of this field - depends on bits 0 and 1 of Flags.

    -

    Link name

    This is the name of the link, non-NULL terminated.

    -

    Link information

    The format of this field depends on the link type.

    -

    For hard links, the field is formatted as follows: - - - - - - -
    Size of Offsets bytes:The address of the object header for the object that the - link points to. -
    -

    - -

    - For soft links, the field is formatted as follows: - - - - - - - - - - -
    Bytes 1-2:Length of soft link value.
    Length of soft link value bytes:A non-NULL-terminated string storing the value of the - soft link. -
    -

    - -

    - For external links, the field is formatted as follows: - - - - - - - - - - -
    Bytes 1-2:Length of external link value.
    Length of external link value bytes:The first byte contains the version number in the - upper 4 bits and flags in the lower 4 bits for the external - link. Both version and flags are defined to be zero in - this document. The remaining bytes consist of two - NULL-terminated strings, with no padding between them. - The first string is the name of the HDF5 file containing - the object linked to and the second string is the full path - to the object linked to, within the HDF5 file’s - group hierarchy. -
    -

    - -

    - For user-defined links, the field is formatted as follows: - - - - - - - - - - -
    Bytes 1-2:Length of user-defined data.
    Length of user-defined link value bytes:The data supplied for the user-defined link type.
    -

    - -
    No additional record data stored
    -
    + +
    -

    IV.A.2.h. The Data Storage - -External Data Files Message

    +
    +
    + + - -
    -
    Fractal Heap “Single” Section Data + Record
    - - - - - - - -
    Header Message Name: External - Data Files
    Header Message Type: 0x0007
    Length: Varies
    Status: Optional; may not be - repeated.
    Description:The external data storage message indicates that the data - for an object is stored outside the HDF5 file. The filename of - the object is stored as a Universal Resource Location (URL) of - the actual filename containing the data. An external file list - record also contains the byte offset of the start of the data - within the file and the amount of space reserved in the file - for that data.
    Format of Data: See the tables - below.
    - - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - External File List Message -
    bytebytebytebyte
    VersionReserved (zero)
    Allocated SlotsUsed Slots

    Heap AddressO


    Slot Definitions...

    + + No additional record data stored + + +
    + +
    +
    +
    + + -
    Fractal Heap “First Row” Section Data + Record
    - - -
      - (Items marked with an ‘O’ in the above table are of the size - specified in “Size of Offsets” field in the superblock.) -
    + Same format as “indirect” + section data + + +
    -
    +
    +
    +
    + + -
    -
    -
    Fractal Heap “Normal Row” Section Data + Record
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameDescription

    Version

    -

    The version number information is used for changes in the format of - External Data Storage Message and is described here: - - - - - - - - - - - - - -
    VersionDescription
    0Never used.
    1The current version used by the library.

    - -

    Allocated Slots

    -

    The total number of slots allocated in the message. Its value must be at least as - large as the value contained in the Used Slots field. (The current library simply - uses the number of Used Slots for this message)

    -

    Used Slots

    -

    The number of initial slots which contains valid information.

    -

    Heap Address

    -

    This is the address of a local heap which contains the names for the external - files (The local heap information can be found in Disk Format Level 1D in this - document). The name at offset zero in the heap is always the empty string.

    -

    Slot Definitions

    -

    The slot definitions are stored in order according to the array addresses they - represent.

    -
    -
    + + No additional record data stored + + + -
    -
    - - - - - - - - - - - - - - - - - - - - - -
    - External File List Slot -
    bytebytebytebyte

    Name Offset in Local HeapL


    Offset in External Data FileL


    Data Size in External FileL

    +
    +
    +
    + + -
    Fractal Heap “Indirect” Section Data + Record
    - - -
      - (Items marked with an ‘L’ in the above table are of the size - specified in “Size of Lengths” field in the superblock.) -
    + byte + byte + byte + byte + -
    + + Fractal Heap Indirect Block Offset (variable + size) + -
    -
    - - - - - - - - - - - - - - - - - - - - -
    Field NameDescription

    Name Offset in Local Heap

    -

    The byte offset within the local name heap for the name - of the file. File names are stored as a URL which has a - protocol name, a host name, a port number, and a file - name: - protocol:port//host/file. - If the protocol is omitted then “file:” is assumed. If - the port number is omitted then a default port for that - protocol is used. If both the protocol and the port - number are omitted then the colon can also be omitted. If - the double slash and host name are omitted then - “localhost” is assumed. The file name is the only - mandatory part, and if the leading slash is missing then - it is relative to the application’s current working - directory (the use of relative names is not - recommended). -

    -

    Offset in External Data File

    -

    This is the byte offset to the start of the data in the - specified file. For files that contain data for a single - dataset this will usually be zero.

    -

    Data Size in External File

    -

    This is the total number of bytes reserved in the - specified file for raw data storage. For a file that - contains exactly one complete dataset which is not - extendable, the size will usually be the exact size of the - dataset. However, by making the size larger one allows - HDF5 to extend the dataset. The size can be set to a value - larger than the entire file since HDF5 will read zeroes - past the end of the file without failing.

    -
    -
    - - -
    -

    IV.A.2.i. The Data Storage - Layout -Message

    - - -
    - - - - - - - - -
    Header Message Name: Data Storage - - Layout
    Header Message Type: 0x0008
    Length: Varies
    Status: Required for datasets; may not - be repeated.
    Description:Data layout describes how the elements of a multi-dimensional - array are stored in the HDF5 file. Three types of data layout - are supported: -
      -
    1. Contiguous: The array is stored in one contiguous area of - the file. This layout requires that the size of the array be - constant: data manipulations such as chunking, compression, - checksums, or encryption are not permitted. The message stores - the total storage size of the array. The offset of an element - from the beginning of the storage area is computed as in a C - array.
    2. -
    3. Chunked: The array domain is regularly decomposed into - chunks, and each chunk is allocated and stored separately. This - layout supports arbitrary element traversals, compression, - encryption, and checksums. (these features are described - in other messages). The message stores the size of a chunk - instead of the size of the entire array; the storage size of - the entire array can be calculated by traversing the B-tree - that stores the chunk addresses.
    4. -
    5. Compact: The array is stored in one contiguous block, as - part of this object header message.
    6. -
    Format of Data: See the tables - below.
    - - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - Data Layout Message (Versions 1 and 2) -
    bytebytebytebyte
    VersionDimensionalityLayout ClassReserved (zero)
    Reserved (zero)

    Data AddressO (optional)

    Dimension 0 Size
    Dimension 1 Size
    ...
    Dimension #n Size
    Dataset Element Size (optional)
    Compact Data Size (optional)

    Compact Data... (variable size, optional)

    + + Block Start Row + Block Start Column + - - - -
      - (Items marked with an ‘O’ in the above table are of the size - specified in “Size of Offsets” field in the superblock.) -
    + Number of Blocks + This space inserted + only to align table nicely + + +
    -
    +
    +
    + + + + + -
    -
    -
    Field NameDescription
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + - - - - -
    Field NameDescription

    Version

    -

    The version number information is used for changes in the format of the data - layout message and is described here: - - - - - - - - - - - - - - - - - - - - -
    VersionDescription
    0Never used.
    1Used by version 1.4 and before of the library to encode layout information. - Data space is always allocated when the data set is created.
    2Used by version 1.6.x of the library to encode layout information. - Data space is allocated only when it is necessary.

    -

    Dimensionality

    An array has a fixed dimensionality. This field - specifies the number of dimension size fields later in the - message. The value stored for chunked storage is 1 greater than - the number of dimensions in the dataset’s dataspace. - For example, 2 is stored for a 1 dimensional dataset. -

    -

    Layout Class

    The layout class specifies the type of storage for the data - and how the other fields of the layout message are to be - interpreted. - - - - - - - - - - - - - - - - - - - - - -
    ValueDescription
    0Compact Storage -
    1Contiguous Storage -
    2Chunked Storage -
    -

    -

    Data Address

    For contiguous storage, this is the address of the raw - data in the file. For chunked storage this is the address - of the v1 B-tree that is used to look up the addresses of the - chunks. This field is not present for compact storage. - If the version for this message is greater than 1, the address - may have the “undefined address” value, to indicate that - storage has not yet been allocated for this array.

    -

    Dimension #n Size

    For contiguous and compact storage the dimensions define - the entire size of the array while for chunked storage they define - the size of a single chunk. In all cases, they are in units of - array elements (not bytes). The first dimension stored in the list - of dimensions is the slowest changing dimension and the last - dimension stored is the fastest changing dimension. -

    -

    Dataset Element Size

    The size of a dataset element, in bytes. This field is only - present for chunked storage. -

    -

    Compact Data Size

    This field is only present for compact data storage. - It contains the size of the raw data for the dataset array, in - bytes.

    -

    Fractal Heap Block Offset

    +

    The offset of the indirect block in the fractal heap’s + address space containing the empty blocks.

    +

    + The number of bytes used to encode this field is the minimum number + of bytes needed to encode values for the Maximum Heap Size + (in the fractal heap’s header). +

    +

    Compact Data

    This field is only present for compact data storage. - It contains the raw data for the dataset array.

    -
    -
    + +

    Block Start Row

    + +

    This is the row that the empty blocks start in.

    + + -
    -

    Version 3 of this message re-structured the format into specific - properties that are required for each layout class.

    - - -
    - - - - - - - - - - - - - - - - - - - -
    - Data Layout Message (Version 3) -
    bytebytebytebyte
    VersionLayout ClassThis space inserted only to align table nicely

    Properties (variable size)

    -
    + +

    Block Start Column

    + +

    This is the column that the empty blocks start in.

    + + -
    -
    - - - - - - - - - - - - - - - - - - - - -
    Field NameDescription

    Version

    -

    The version number information is used for changes in the format of layout message - and is described here: - - - - - - - - - - -
    VersionDescription
    3Used by the version 1.6.3 and later of the library to store properties - for each layout class.

    -

    Layout Class

    The layout class specifies the type of storage for the data - and how the other fields of the layout message are to be - interpreted. - - - - - - - - - - - - - - - - - - - - -
    ValueDescription
    0Compact Storage -
    1Contiguous Storage -
    2Chunked Storage -
    -

    -

    Properties

    This variable-sized field encodes information specific to each - layout class and is described below. If there is no property - information specified for a layout class, the size of this field - is zero bytes.

    -
    + +

    Number of Blocks

    + +

    This is the number of empty blocks covered by the section.

    + + + + -
    -

    Class-specific information for compact layout (Class 0): (Note: The dimensionality information - is in the Dataspace message)

    - - -
    - - - - - - - - - - - - - - - - - - -
    - Compact Storage Property Description -
    bytebytebytebyte
    SizeThis space inserted only to align table nicely

    Raw Data... (variable size)

    -
    +
    +

    + III.H. Disk Format: Level 1H - Shared Object + Header Message Table +

    + +

    + The shared object header message table is used to locate + object header messages that are shared between two or more object + headers in the file. Shared object header messages are stored and + indexed in the file in one of two ways: indexed sequentially in a shared + header message list or indexed with a v2 B-tree. The shared messages + themselves are either stored in a fractal heap (when two or more + objects share the message), or remain in an object’s header (when + only one object uses the message currently, but the message can be + shared in the future). +

    -
    -
    - - - - - - - - - - - - - - - -
    Field NameDescription

    Size

    This field contains the size of the raw data for the dataset - array, in bytes. -

    -

    Raw Data

    This field contains the raw data for the dataset array.

    -
    +

    + The shared object header message table contains a list of + shared message index headers. Each index header records information + about the version of the index format, the index storage type, flags + for the message types indexed, the number of messages in the index, the + address where the index resides, and the fractal heap address if shared + messages are stored there. +

    + +

    + Each index can be either a list or a v2 B-tree and may transition + between those two forms as the number of messages in the index varies. + Each shared message record contains information used to locate the + shared message from either a fractal heap or an object header. The + types of messages that can be shared are: Dataspace, Datatype, + Fill Value, Filter Pipeline and Attribute. +

    +

    + The shared object header message table is pointed to from a shared message table message in the + superblock extension for a file. This message stores the version of the + table format, along with the number of index headers in the table. +

    -
    -

    Class-specific information for contiguous layout (Class 1): (Note: The dimensionality information - is in the Dataspace message)

    - - -
    - - - - - - - - - - - - - - - - - -
    - Contiguous Storage Property Description -
    bytebytebytebyte

    AddressO


    SizeL

    +
    + + -
    Shared Object Header Message Table
    - - + + + + + + - - -
      - (Items marked with an ‘O’ in the above table are of the size - specified in “Size of Offsets” field in the superblock.) -
    bytebytebytebyte
      - (Items marked with an ‘L’ in the above table are of the size - specified in “Size of Lengths” field in the superblock.) -
    + Signature + -
    + + Version for index #0 + Index Type for index #0 + Message Type Flags for index #0 + -
    -
    - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Field NameDescription

    Address

    This is the address of the raw data in the file. - The address may have the “undefined address” value, to indicate - that storage has not yet been allocated for this array.

    Size

    This field contains the size allocated to store the raw data, - in bytes. -

    -
    Minimum Message Size for index #0
    List Cutoff for index #0v2 B-tree Cutoff for index #0
    Number of Messages for index #0This space inserted + only to align table nicely

    Index AddressO for index #0
    +

    Fractal Heap AddressO for + index #0
    +
    ...
    ...
    Version for index #N-1Index Type for index #N-1Message Type Flags for index #N-1
    Minimum Message Size for index #N-1
    List Cutoff for index #N-1v2 B-tree Cutoff for index #N-1
    Number of Messages for index #N-1This space inserted + only to align table nicely

    Index AddressO for index #N-1
    +

    Fractal Heap AddressO for + index #N-1
    +
    Checksum
    + + + + + + +
     (Items marked with an ‘O’ in the + above table are of the size specified in “Size of + Offsets” field in the superblock.)
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Field NameDescription

    Signature

    +

    + The ASCII character string “ + SMTB + ” is used to indicate the beginning of the Shared Object + Header Message table. This gives file consistency checking + utilities a better chance of reconstructing a damaged file. +

    +

    Version for index #N

    +

    This is the version number for the list of shared object + header message indexes and this document describes version 0.

    +

    Index Type for index #N

    +

    The type of index can be an unsorted list or a v2 B-tree.

    +

    Message Type Flags for index #N

    +

    This field indicates the type of messages tracked in the + index, as follows:

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    BitsDescription
    0If set, the index tracks Dataspace Messages. +
    1If set, the message tracks Datatype Messages. +
    2If set, the message tracks Fill Value Messages. +
    3If set, the message tracks Filter Pipeline + Messages. +
    4If set, the message tracks Attribute Messages. +
    5-15Reserved (zero).
    +

    + + +

    An index can track more than one type of message, but each + type of message can only by in one index.

    +

    Minimum Message Size for index #N

    +

    This is the message size sharing threshold for the index. If + the encoded size of the message is less than this value, the + message is not shared.

    +

    List Cutoff for index #N

    +

    This is the cutoff value for the indexing of messages to + switch from a list to a v2 B-tree. If the number of messages is + greater than this value, the index should be a v2 B-tree.

    +

    v2 B-tree Cutoff for index #N

    +

    This is the cutoff value for the indexing of messages to + switch from a v2 B-tree back to a list. If the number of messages + is less than this value, the index should be a list.

    +

    Number of Messages for index #N

    +

    The number of shared messages being tracked for the index.

    +

    Index Address for index #N

    +

    This field is the address of the list or v2 B-tree where the + index nodes reside.

    +

    Fractal Heap Address for index #N

    +

    This field is the address of the fractal heap if shared + messages are stored there.

    +

    Checksum

    +

    This is the checksum for the table.

    +
    +
    + +
    +

    + Shared messages are indexed either with a shared message + record list, described below, or using a v2 B-tree (using record type + 7). The number of records in the shared message record list is + determined in the index’s entry in the shared object + header message table. +

    + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Shared Message Record List
    bytebytebytebyte
    Signature
    Shared Message Record #0
    Shared Message Record #1
    ...
    Shared Message Record #N-1
    Checksum
    +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + +
    Field NameDescription

    Signature

    +

    + The ASCII character string “ + SMLI + ” is used to indicate the beginning of a list of index nodes. + This gives file consistency checking utilities a better chance of + reconstructing a damaged file. +

    +

    Shared Message Record #N

    +

    + The record for locating the shared message, either in the fractal + heap for the index, or an object header (see format for index + nodes below). +

    +

    Checksum

    +

    This is the checksum for the list.

    +
    +
    + +
    +

    The record for each shared message in an index is stored in one + of the following forms:

    + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Shared Message Record, for messages stored in a + fractal heap
    bytebytebytebyte
    Message LocationThis space inserted + only to align table nicely
    Hash Value
    Reference Count

    Fractal Heap ID
    +
    +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + +
    Field NameDescription

    Message Location

    +

    This has a value of 0 indicating that the message is stored + in the heap.

    +

    Hash Value

    +

    This is the hash value for the message.

    +

    Reference Count

    +

    This is the number of times the message is used in the file. +

    +

    Fractal Heap ID

    +

    This is an 8-byte fractal heap ID for the message as stored + in the fractal heap for the index.

    +
    +
    + +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Shared Message Record, for messages stored in an + object header
    bytebytebytebyte
    Message LocationThis space inserted + only to align table nicely
    Hash Value
    ReservedMessage TypeCreation Index

    Object Header AddressO
    +
    + + + + + + +
     (Items marked with an ‘O’ in the + above table are of the size specified in “Size of + Offsets” field in the superblock.)
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Field NameDescription

    Message Location

    +

    This has a value of 1 indicating that the message is stored + in an object header.

    +

    Hash Value

    +

    This is the hash value for the message.

    +

    Message Type

    +

    This is the message type in the object header.

    +

    Creation Index

    +

    This is the creation index of the message within the object + header.

    +

    Object Header Address

    +

    This is the address of the object header where the message is + located.

    +
    +
    + + + +
    +
    +
    +

    + IV. Disk Format: Level 2 - Data Objects +

    + +

    Data objects contain the “real” user-visible + information in the file. These objects compose the scientific data and + other information which are generally thought of as “data” + by the end-user. All the other information in the file is provided as a + framework for storing and accessing these data objects.

    + +

    A data object is composed of header and data information. The + header information contains the information needed to interpret the + data information for the object as well as additional + “metadata” or pointers to additional “metadata” + used to describe or annotate each object.

    + +
    +

    + IV.A. Disk Format: Level 2A - Data Object + Headers +

    + +

    The header information of an object is designed to encompass all + of the information about an object, except for the data itself. This + information includes the dataspace, the datatype, information about how + the data is stored on disk (in external files, compressed, broken up in + blocks, and so on), as well as other information used by the library to + speed up access to the data objects or maintain a file’s + integrity. Information stored by user applications as attributes is + also stored in the object’s header. The header of each object is + not necessarily located immediately prior to the object’s data in + the file and in fact may be located in any position in the file. The + order of the messages in an object header is not significant.

    + +

    Object headers are composed of a prefix and a set of messages. + The prefix contains the information needed to interpret the messages + and a small amount of metadata about the object, and the messages + contain the majority of the metadata about the object.

    + +
    +

    + IV.A.1. Disk Format: Level 2A1 - Data + Object Header Prefix +

    + +
    +

    + IV.A.1.a. Version 1 Data Object + Header Prefix +

    + +

    Header messages are aligned on 8-byte boundaries for version 1 + object headers.

    + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Version 1 Object Header
    bytebytebytebyte
    VersionReserved (zero)Total Number of Header Messages
    Object Reference Count
    Object Header Size
    Header Message Type #1Size of Header Message Data #1
    Header Message #1 FlagsReserved (zero)

    Header Message Data #1
    +
    .
    .
    .
    Header Message Type #nSize of Header Message Data #n
    Header Message #n FlagsReserved (zero)

    Header Message Data #n
    +
    +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Field NameDescription

    Version

    +

    This value is used to determine the format of the information + in the object header. When the format of the object header is + changed, the version number is incremented and can be used to + determine how the information in the object header is formatted. + This is version one (1) (there was no version zero (0)) of the + object header.

    +

    Total Number of Header Messages

    +

    This value determines the total number of messages listed in + object headers for this object. This value includes the messages in + continuation messages for this object.

    +

    Object Reference Count

    +

    This value specifies the number of “hard links” + to this object within the current file. References to the object + from external files, “soft links” in this file and + object references in this file are not tracked.

    +

    Object Header Size

    +

    This value specifies the number of bytes of header message + data following this length field that contain object header + messages for this object header. This value does not include the + size of object header continuation blocks for this object elsewhere + in the file.

    +

    Header Message #n Type

    +

    This value specifies the type of information included in the + following header message data. The message types for header + messages are defined in sections below.

    +

    Size of Header Message #n Data

    +

    This value specifies the number of bytes of header message + data following the header message type and length information for + the current message. The size includes padding bytes to make the + message a multiple of eight bytes.

    +

    Header Message #n Flags

    +

    This is a bit field with the following definition:

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    BitDescription
    0If set, the message data is constant. This is used for + messages like the datatype message of a dataset.
    1If set, the message is shared and stored in + another location than the object header. The Header Message Data + field contains a Shared Message (described in the Data Object Header Messages + section below) and the Size of Header Message Data field contains + the size of that Shared Message. +
    2If set, the message should not be shared.
    3If set, the HDF5 decoder should fail to open this object + if it does not understand the message’s type and the file + is open with permissions allowing write access to the file. + (Normally, unknown messages can just be ignored by HDF5 decoders) +
    4If set, the HDF5 decoder should set bit 5 of this + message’s flags (in other words, this bit field) if it does + not understand the message’s type and the object is + modified in any way. (Normally, unknown messages can just be + ignored by HDF5 decoders)
    5If set, this object was modified by software that did not + understand this message. (Normally, unknown messages should just + be ignored by HDF5 decoders) (Can be used to invalidate an index + or a similar feature)
    6If set, this message is shareable.
    7If set, the HDF5 decoder should always fail to open this + object if it does not understand the message’s type + (whether it is open for read-only or read-write access). + (Normally, unknown messages can just be ignored by HDF5 decoders) +
    +

    + +

    Header Message #n Data

    +

    The format and length of this field is determined by the + header message type and size respectively. Some header message + types do not require any data and this information can be + eliminated by setting the length of the message to zero. The data + is padded with enough zeroes to make the size a multiple of eight. +

    +
    +
    + +
    +

    + IV.A.1.b. Version 2 Data Object + Header Prefix +

    + +

    Note that the “total number of messages” field has + been dropped from the data object header prefix in this version. The + number of messages in the data object header is just determined by the + messages encountered in all the object header blocks.

    + +

    + Note also that the fields and messages in this version of data object + headers have no alignment or padding bytes inserted - they are + stored packed together. +

    + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Version 2 Object Header
    bytebytebytebyte
    Signature
    VersionFlagsThis space inserted + only to align table nicely
    Access time (optional)
    Modification Time (optional)
    Change Time (optional)
    Birth Time (optional)
    Maximum # of compact attributes (optional)Minimum # of dense attributes (optional)
    Size of Chunk #0 (variable size)This space inserted + only to align table nicely
    Header Message Type #1Size of Header Message Data #1Header Message #1 Flags
    Header Message #1 Creation Order (optional)This space inserted + only to align table nicely

    Header Message Data #1
    +
    .
    .
    .
    Header Message Type #nSize of Header Message Data #nHeader Message #n Flags
    Header Message #n Creation Order (optional)This space inserted + only to align table nicely

    Header Message Data #n
    +
    Gap (optional, variable size)
    Checksum
    +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Field NameDescription

    Signature

    +

    + The ASCII character string “ + OHDR + ” is used to indicate the beginning of an object header. This + gives file consistency checking utilities a better chance of + reconstructing a damaged file. +

    +

    Version

    +

    This field has a value of 2 indicating version 2 of the + object header.

    +

    Flags

    +

    This field is a bit field indicating additional information + about the object header.

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Bit(s)Description
    0-1This two bit field determines the size of the Size + of Chunk #0 field. The values are: + + + + + + + + + + + + + + + + + + + + + + +
    ValueDescription
    0The Size of Chunk #0 field is 1 byte. +
    1The Size of Chunk #0 field is 2 bytes. +
    2The Size of Chunk #0 field is 4 bytes. +
    3The Size of Chunk #0 field is 8 bytes. +
    +

    +
    2If set, attribute creation order is tracked.
    3If set, attribute creation order is indexed.
    4If set, non-default attribute storage phase change values + are stored.
    5If set, access, modification, change and birth times are + stored.
    6-7Reserved
    +

    + +

    Access Time

    +

    This 32-bit value represents the number of seconds after the + UNIX epoch when the object’s raw data was last accessed (in + other words, read or written).

    +

    + This field is present if bit 5 of flags is set. +

    +

    Modification Time

    +

    This 32-bit value represents the number of seconds after the + UNIX epoch when the object’s raw data was last modified (in + other words, written).

    +

    + This field is present if bit 5 of flags is set. +

    +

    Change Time

    +

    This 32-bit value represents the number of seconds after the + UNIX epoch when the object’s metadata was last changed.

    +

    + This field is present if bit 5 of flags is set. +

    +

    Birth Time

    +

    This 32-bit value represents the number of seconds after the + UNIX epoch when the object was created.

    +

    + This field is present if bit 5 of flags is set. +

    +

    Maximum # of compact attributes

    +

    This is the maximum number of attributes to store in the + compact format before switching to the indexed format.

    +

    + This field is present if bit 4 of flags is set. +

    +

    Minimum # of dense attributes

    +

    This is the minimum number of attributes to store in the + indexed format before switching to the compact format.

    +

    + This field is present if bit 4 of flags is set. +

    +

    Size of Chunk #0

    +

    This unsigned value specifies the number of bytes of header + message data following this field that contain object header + information.

    +

    This value does not include the size of object header + continuation blocks for this object elsewhere in the file.

    +

    + The length of this field varies depending on bits 0 and 1 of the flags + field. +

    +

    Header Message #n Type

    +

    Same format as version 1 of the object header, described + above.

    +

    Size of Header Message #n Data

    +

    + This value specifies the number of bytes of header message data + following the header message type and length information for the + current message. The size of messages in this version does not + include any padding bytes. +

    +

    Header Message #n Flags

    +

    Same format as version 1 of the object header, described + above.

    +

    Header Message #n Creation Order

    +

    This field stores the order that a message of a given type + was created in.

    +

    + This field is present if bit 2 of flags is set. +

    +

    Header Message #n Data

    +

    Same format as version 1 of the object header, described + above.

    +

    Gap

    +

    A gap in an object header chunk is inferred by the end of the + messages for the chunk before the beginning of the chunk’s + checksum. Gaps are always smaller than the size of an object header + message prefix (message type + message size + message flags).

    +

    Gaps are formed when a message (typically an attribute + message) in an earlier chunk is deleted and a message from a later + chunk that does not quite fit into the free space is moved into the + earlier chunk.

    +

    Checksum

    +

    This is the checksum for the object header chunk.

    +
    +
    + +

    The header message types and the message data associated with + them compose the critical “metadata” about each object. + Some header messages are required for each object while others are + optional. Some optional header messages may also be repeated several + times in the header itself, the requirements and number of times + allowed in the header will be noted in each header message description + below.

    + + +
    +

    + IV.A.2. Disk Format: Level 2A2 - + Data Object Header Messages +

    + +

    Data object header messages are small pieces of metadata that are + stored in the data object header for each object in an HDF5 file. Data + object header messages provide the metadata required to describe an + object and its contents, as well as optional pieces of metadata that + annotate the meaning or purpose of the object.

    + +

    + Data object header messages are either stored directly in the data + object header for the object or are shared between multiple objects in + the file. When a message is shared, a flag in the Message + Flags indicates that the actual Message Data portion of that + message is stored in another location (such as another data object + header, or a heap in the file) and the Message Data field + contains the information needed to locate the actual information for + the message. +

    + +

    The format of shared message data is described here:

    + +
    + + + + + + + + + + + + + + + + + + + + + + + +
    Shared Message (Version 1)
    bytebytebytebyte
    VersionTypeReserved (zero)
    Reserved (zero)

    AddressO
    +
    + + + + + + +
     (Items marked with an ‘O’ in the + above table are of the size specified in “Size of + Offsets” field in the superblock.)
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + +
    Field NameDescription

    Version

    The version number is used when there are changes in + the format of a shared object message and is described here:

    + + + + + + + + + + + + + + + +
    VersionDescription
    0Never used.
    1Used by the library before version 1.6.1.
    +

    Type

    The type of shared message location:

    + + + + + + + + + + +
    ValueDescription
    0Message stored in another object’s header (a committed + message). +
    +

    Address

    The address of the object header containing the + message to be shared.

    +
    + +
    +
    +
    + + + + + + + + + + + + + + + + + + + +
    Shared Message (Version 2)
    bytebytebytebyte
    VersionTypeThis space inserted + only to align table nicely

    AddressO
    +
    + + + + + + +
     (Items marked with an ‘O’ in the + above table are of the size specified in “Size of + Offsets” field in the superblock.)
    +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + +
    Field NameDescription

    Version

    The version number is used when there are changes in + the format of a shared object message and is described here:

    + + + + + + + + + + +
    VersionDescription
    2Used by the library of version 1.6.1 and after.
    +

    Type

    The type of shared message location:

    + + + + + + + + + + +
    ValueDescription
    0Message stored in another object’s header (a committed + message). +
    +

    Address

    The address of the object header containing the + message to be shared.

    +
    + +
    +
    +
    + + + + + + + + + + + + + + + + + + + +
    Shared Message (Version 3)
    bytebytebytebyte
    VersionTypeThis space inserted + only to align table nicely
    Location (variable size)
    +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + +
    Field NameDescription

    Version

    The version number indicates changes in the format of + shared object message and is described here:

    + + + + + + + + + + +
    VersionDescription
    3Used by the library of version 1.8 and after. In this + version, the Type field can indicate that the message is + stored in the fractal heap. +
    +

    Type

    The type of shared message location:

    + + + + + + + + + + + + + + + + + + + + + + + + + + +
    ValueDescription
    0Message is not shared and is not shareable.
    1Message stored in file’s shared object + header message heap (a shared message). +
    2Message stored in another object’s header (a committed + message). +
    3Message stored is not shared, but is shareable.
    +

    Location

    + This field contains either a Size of Offsets-bytes address + of the object header containing the message to be shared, or an + 8-byte fractal heap ID for the message in the file’s shared + object header message heap. +

    +
    + + +

    The following is a list of currently defined header messages:

    + +
    +

    + IV.A.2.a. The NIL Message +

    + + +
    + + + + + + + + + + + + + + + + + + + + +
    Header Message Name: NIL
    Header Message Type: 0x0000
    Length: Varies
    Status: Optional; may be repeated.
    Description:The NIL message is used to indicate a message which is to be + ignored when reading the header messages for a data object. + [Possibly one which has been deleted for some reason.]
    Format of Data: Unspecified
    +
    + + + +
    +

    + IV.A.2.b. The Dataspace Message +

    + + +
    + + + + + + + + + + + + + + + + + + + + + +
    Header Message Name: Dataspace
    Header Message Type: 0x0001
    Length: Varies according to the number of + dimensions, as described in the following table.
    Status: Required for dataset objects; may + not be repeated.
    Description:The dataspace message describes the number of dimensions (in + other words, “rank”) and size of each dimension that the + data object has. This message is only used for datasets which have a + simple, rectilinear, array-like layout; datasets requiring a more + complex layout are not yet supported.
    Format of Data: See the tables below.
    +
    + + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Dataspace Message - Version 1
    bytebytebytebyte
    VersionDimensionalityFlagsReserved
    Reserved

    Dimension #1 SizeL
    +
    .
    .
    .

    Dimension #n SizeL
    +

    Dimension #1 Maximum SizeL (optional)
    +
    .
    .
    .

    Dimension #n Maximum SizeL (optional)
    +

    Permutation Index #1L (optional)
    +
    .
    .
    .

    Permutation Index #nL (optional)
    +
    + + + + + + +
     (Items marked with an ‘L’ in the + above table are of the size specified in “Size of + Lengths” field in the superblock.)
    +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Field NameDescription

    Version

    +

    This value is used to determine the format of the Dataspace + Message. When the format of the information in the message is + changed, the version number is incremented and can be used to + determine how the information in the object header is formatted. + This document describes version one (1) (there was no version zero + (0)).

    +

    Dimensionality

    +

    This value is the number of dimensions that the data object + has.

    +

    Flags

    +

    This field is used to store flags to indicate the presence of + parts of this message. Bit 0 (the least significant bit) is used to + indicate that maximum dimensions are present. Bit 1 is used to + indicate that permutation indices are present.

    +

    Dimension #n Size

    +

    This value is the current size of the dimension of the data + as stored in the file. The first dimension stored in the list of + dimensions is the slowest changing dimension and the last dimension + stored is the fastest changing dimension.

    +

    Dimension #n Maximum Size

    +

    + This value is the maximum size of the dimension of the data as + stored in the file. This value may be the special “unlimited” size which indicates + that the data may expand along this dimension indefinitely. If + these values are not stored, the maximum size of each dimension is + assumed to be the dimension’s current size. +

    +

    Permutation Index #n

    +

    This value is the index permutation used to map each + dimension from the canonical representation to an alternate axis + for each dimension. If these values are not stored, the first + dimension stored in the list of dimensions is the slowest changing + dimension and the last dimension stored is the fastest changing + dimension.

    +
    +
    + + + +
    +

    Version 2 of the dataspace message dropped the optional + permutation index value support, as it was never implemented in the + HDF5 Library:

    + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Dataspace Message - Version 2
    bytebytebytebyte
    VersionDimensionalityFlagsType

    Dimension #1 SizeL
    +
    .
    .
    .

    Dimension #n SizeL
    +

    Dimension #1 Maximum SizeL (optional)
    +
    .
    .
    .

    Dimension #n Maximum SizeL (optional)
    +
    + + + + + + +
     (Items marked with an ‘L’ in the + above table are of the size specified in “Size of + Lengths” field in the superblock.)
    +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Field NameDescription

    Version

    +

    This value is used to determine the format of the Dataspace + Message. This field should be ‘2’ for version 2 format + messages.

    +

    Dimensionality

    +

    This value is the number of dimensions that the data object + has.

    +

    Flags

    +

    This field is used to store flags to indicate the presence of + parts of this message. Bit 0 (the least significant bit) is used to + indicate that maximum dimensions are present.

    +

    Type

    +

    This field indicates the type of the dataspace:

    + + + + + + + + + + + + + + + + + + +
    ValueDescription
    0A scalar dataspace; in other words, a dataspace + with a single, dimensionless element. +
    1A simple dataspace; in other words, a dataspace + with a rank > 0 and an appropriate # of dimensions. +
    2A null dataspace; in other words, a dataspace + with no elements. +
    +

    +

    Dimension #n Size

    +

    This value is the current size of the dimension of the data + as stored in the file. The first dimension stored in the list of + dimensions is the slowest changing dimension and the last dimension + stored is the fastest changing dimension.

    +

    Dimension #n Maximum Size

    +

    + This value is the maximum size of the dimension of the data as + stored in the file. This value may be the special “unlimited” size which indicates + that the data may expand along this dimension indefinitely. If + these values are not stored, the maximum size of each dimension is + assumed to be the dimension’s current size. +

    +
    +
    + + + + + +
    +

    + IV.A.2.c. The Link Info Message +

    + + +
    + + + + + + + + + + + + + + + + + + + + +
    Header Message Name: Link Info
    Header Message Type: 0x002
    Length: Varies
    Status: Optional; may not be repeated.
    Description:The link info message tracks variable information about the + current state of the links for a “new style” + group’s behavior. Variable information will be stored in this + message and constant information will be stored in the Group Info message. +
    Format of Data: See the tables below.
    +
    + + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Link Info
    bytebytebytebyte
    VersionFlagsThis space inserted + only to align table nicely

    Maximum Creation Index (8 bytes, + optional)
    +

    Fractal Heap AddressO
    +

    Address of v2 B-tree for Name IndexO
    +

    Address of v2 B-tree for Creation Order + IndexO (optional)
    +
    + + + + + + +
     (Items marked with an ‘O’ in the + above table are of the size specified in “Size of + Offsets” field in the superblock.)
    +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Field NameDescription

    Version

    +

    The version number for this message. This document describes + version 0.

    +

    Flags

    This field determines various optional aspects of the + link info message:

    + + + + + + + + + + + + + + + + + + +
    BitDescription
    0If set, creation order for the links is tracked.
    1If set, creation order for the links is indexed.
    2-7Reserved
    +

    Maximum Creation Index

    This 64-bit value is the maximum creation order index + value stored for a link in this group.

    +

    + This field is present if bit 0 of flags is set. +

    Fractal Heap Address

    +

    + This is the address of the fractal heap to store dense links. Each + link stored in the fractal heap is stored as a Link Message. +

    +

    + If there are no links in the group, or the group’s links are + stored “compactly” (as object header messages), this + value will be the undefined address. +

    +

    Address of v2 B-tree for Name Index

    This is the address of the version 2 B-tree to index + names of links.

    +

    + If there are no links in the group, or the group’s links are + stored “compactly” (as object header messages), this + value will be the undefined address. +

    Address of v2 B-tree for Creation Order Index

    This is the address of the version 2 B-tree to index + creation order of links.

    +

    + If there are no links in the group, or the group’s links are + stored “compactly” (as object header messages), this + value will be the undefined address. +

    +

    + This field exists if bit 1 of flags is set. +

    +
    + + +
    +

    + IV.A.2.d. The Datatype Message +

    + + +
    + + + + + + + + + + + + + + + + + + + + +
    Header Message Name: Datatype
    Header Message Type: 0x0003
    Length: Variable
    Status: Required for dataset or committed + datatype (formerly named datatype) objects; may not be repeated.
    Description:

    The datatype message defines the datatype for each + element of a dataset or a common datatype for sharing between + multiple datasets. A datatype can describe an atomic type like a + fixed- or floating-point type or more complex types like a C struct + (compound datatype), array (array datatype) or C++ vector + (variable-length datatype).

    +

    Datatype messages that are part of a dataset object do not + describe how elements are related to one another; the dataspace + message is used for that purpose. Datatype messages that are part + of a committed datatype (formerly named datatype) message describe + a common datatype that can be shared by multiple datasets in the + file.

    Format of Data: See the tables below.
    +
    + + +
    + + + + + + + + + + + + + + + + + + + + + + + + +
    Datatype Message
    bytebytebytebyte
    Class and VersionClass Bit Field, Bits 0-7Class Bit Field, Bits 8-15Class Bit Field, Bits 16-23
    Size

    +
    Properties
    +
    +
    +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Field NameDescription

    Class and Version

    +

    The version of the datatype message and the datatype’s + class information are packed together in this field. The version + number is packed in the top 4 bits of the field and the class is + contained in the bottom 4 bits.

    +

    The version number information is used for changes in the + format of the datatype message and is described here:

    + + + + + + + + + + + + + + + + + + + + + + +
    VersionDescription
    0Never used
    1Used by early versions of the library to encode compound + datatypes with explicit array fields. See the compound datatype + description below for further details.
    2Used when an array datatype needs to be encoded.
    3Used when a VAX byte-ordered type needs to be encoded. + Packs various other datatype classes more efficiently also.
    +

    + +

    The class of the datatype determines the format for the class + bit field and properties portion of the datatype message, which are + described below. The following classes are currently defined:

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    ValueDescription
    0Fixed-Point
    1Floating-Point
    2Time
    3String
    4Bit field
    5Opaque
    6Compound
    7Reference
    8Enumerated
    9Variable-Length
    10Array
    +

    + +

    Class Bit Fields

    +

    The information in these bit fields is specific to each + datatype class and is described below. All bits not defined for a + datatype class are set to zero.

    +

    Size

    +

    The size of a datatype element in bytes.

    +

    Properties

    +

    This variable-sized sequence of bytes encodes information + specific to each datatype class and is described for each class + below. If there is no property information specified for a datatype + class, the size of this field is zero bytes.

    +
    +
    + + +
    +

    Class specific information for Fixed-Point Numbers (Class 0):

    + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Fixed-point Bit Field Description
    BitsMeaning

    0

    + Byte Order. If zero, byte order is little-endian; otherwise, + byte order is big endian. +

    1, 2

    + Padding type. Bit 1 is the lo_pad bit and bit 2 is the + hi_pad bit. If a datum has unused bits at either end, then the + lo_pad or hi_pad bit is copied to those locations. +

    3

    + Signed. If this bit is set then the fixed-point number is in + 2’s complement form. +

    4-23

    Reserved (zero).

    +
    + +
    +
    + + + + + + + + + + + + + + +
    Fixed-Point Property Description
    ByteByteByteByte
    Bit OffsetBit Precision
    +
    + +
    +
    + + + + + + + + + + + + + + + + +
    Field NameDescription

    Bit Offset

    +

    The bit offset of the first significant bit of the + fixed-point value within the datatype. The bit offset specifies the + number of bits “to the right of” the value (which are + set to the lo_pad bit value).

    +

    Bit Precision

    +

    The number of bits of precision of the fixed-point value + within the datatype. This value, combined with the datatype + element’s size and the Bit Offset field specifies the number + of bits “to the left of” the value (which are set to + the hi_pad bit value).

    +
    +
    + + +
    +

    Class specific information for Floating-Point Numbers (Class 1):

    + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Floating-Point Bit Field Description
    BitsMeaning

    0, 6

    + Byte Order. These two non-contiguous bits specify the + “endianness” of the bytes in the datatype element. +

    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Bit 6Bit 0Description
    00Byte order is little-endian
    01Byte order is big-endian
    10Reserved
    11Byte order is VAX-endian
    +

    1, 2, 3

    + Padding type. Bit 1 is the low bits pad type, bit 2 is the + high bits pad type, and bit 3 is the internal bits pad type. If a + datum has unused bits at either end or between the sign bit, + exponent, or mantissa, then the value of bit 1, 2, or 3 is copied + to those locations. +

    4-5

    + Mantissa Normalization. This 2-bit bit field specifies how + the most significant bit of the mantissa is managed. +

    + + + + + + + + + + + + + + + + + + + + + + +
    ValueDescription
    0No normalization
    1The most significant bit of the mantissa is always set + (except for 0.0).
    2The most significant bit of the mantissa is not stored, + but is implied to be set.
    3Reserved.
    +

    7

    Reserved (zero).

    8-15

    + Sign Location. This is the bit position of the sign bit. + Bits are numbered with the least significant bit zero. +

    16-23

    Reserved (zero).

    +
    + +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + +
    Floating-Point Property Description
    ByteByteByteByte
    Bit OffsetBit Precision
    Exponent LocationExponent SizeMantissa LocationMantissa Size
    Exponent Bias
    +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Field NameDescription

    Bit Offset

    +

    The bit offset of the first significant bit of the + floating-point value within the datatype. The bit offset specifies + the number of bits “to the right of” the value.

    +

    Bit Precision

    +

    The number of bits of precision of the floating-point value + within the datatype.

    +

    Exponent Location

    +

    The bit position of the exponent field. Bits are numbered + with the least significant bit number zero.

    +

    Exponent Size

    +

    The size of the exponent field in bits.

    +

    Mantissa Location

    +

    The bit position of the mantissa field. Bits are numbered + with the least significant bit number zero.

    +

    Mantissa Size

    +

    The size of the mantissa field in bits.

    +

    Exponent Bias

    +

    The bias of the exponent field.

    +
    +
    + + +
    +

    Class specific information for Time (Class 2):

    + + +
    + + + + + + + + + + + + + + + + + +
    Time Bit Field Description
    BitsMeaning

    0

    + Byte Order. If zero, byte order is little-endian; otherwise, + byte order is big endian. +

    1-23

    Reserved (zero).

    +
    + +
    +
    + + + + + + + + + + + +
    Time Property Description
    ByteByte
    Bit Precision
    +
    + +
    +
    + + + + + + + + + + + +
    Field NameDescription

    Bit Precision

    +

    The number of bits of precision of the time value.

    +
    +
    + + +
    +

    Class specific information for Strings (Class 3):

    + + +
    + + + + + + + + + + + + + + + + + + + + + + +
    String Bit Field Description
    BitsMeaning

    0-3

    + Padding type. This four-bit value determines the type of + padding to use for the string. The values are: + +

    + + + + + + + + + + + + + + + + + + + + + + + + + +
    ValueDescription
    0Null Terminate: A zero byte marks the end of the string + and is guaranteed to be present after converting a long string to + a short string. When converting a short string to a long string + the value is padded with additional null characters as necessary. +
    1Null Pad: Null characters are added to the end of the + value during conversions from short values to long values but + conversion in the opposite direction simply truncates the value. +
    2Space Pad: Space characters are added to the end of the + value during conversions from short values to long values but + conversion in the opposite direction simply truncates the value. + This is the Fortran representation of the string.
    3-15Reserved
    +

    4-7

    + Character Set. The character set used to encode the string. +

    + + + + + + + + + + + + + + + + + + + + +
    ValueDescription
    0ASCII character set encoding
    1UTF-8 character set encoding
    2-15Reserved
    +

    8-23

    Reserved (zero).

    +
    + +

    There are no properties defined for the string class.

    + + +

    Class specific information for bit fields (Class 4):

    + +
    + + + + + + + + + + + + + + + + + + + + + + +
    Bitfield Bit Field Description
    BitsMeaning

    0

    + Byte Order. If zero, byte order is little-endian; otherwise, + byte order is big endian. +

    1, 2

    + Padding type. Bit 1 is the lo_pad type and bit 2 is the + hi_pad type. If a datum has unused bits at either end, then the + lo_pad or hi_pad bit is copied to those locations. +

    3-23

    Reserved (zero).

    +
    + +
    +
    + + + + + + + + + + + + + + +
    Bit Field Property Description
    ByteByteByteByte
    Bit OffsetBit Precision
    +
    + +
    +
    + + + + + + + + + + + + + + + +
    Field NameDescription

    Bit Offset

    +

    The bit offset of the first significant bit of the bit field + within the datatype. The bit offset specifies the number of bits + “to the right of” the value.

    +

    Bit Precision

    +

    The number of bits of precision of the bit field within the + datatype.

    +
    +
    + + +
    +

    Class specific information for Opaque (Class 5):

    + +
    + + + + + + + + + + + + + + + + + +
    Opaque Bit Field Description
    BitsMeaning

    0-7

    Length of ASCII tag in bytes.

    8-23

    Reserved (zero).

    +
    + +
    +
    + + + + + + + + + + + + + +
    Opaque Property Description
    ByteByteByteByte

    ASCII Tag

    +
    + +
    +
    + + + + + + + + + + +
    Field NameDescription

    ASCII Tag

    +

    This NUL-terminated string provides a description for the + opaque type. It is NUL-padded to a multiple of 8 bytes.

    +
    +
    + + +
    +

    Class specific information for Compound (Class 6):

    + +
    + + + + + + + + + + + + + + + + + +
    Compound Bit Field Description
    BitsMeaning

    0-15

    + Number of Members. This field contains the number of members + defined for the compound datatype. The member definitions are + listed in the Properties field of the data type message. +

    16-23

    Reserved (zero).

    +
    + + +

    The Properties field of a compound datatype is a list of the + member definitions of the compound datatype. The member definitions + appear one after another with no intervening bytes. The member types + are described with a (recursively) encoded datatype message.

    + +

    Note that the property descriptions are different for different + versions of the datatype version. Additionally note that the version 0 + datatype encoding is deprecated and has been replaced with later + encodings in versions of the HDF5 Library from the 1.4 release onward.

    + + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Compound Properties Description for Datatype + Version 1
    ByteByteByteByte

    Name
    +
    Byte Offset of Member
    DimensionalityReserved (zero)
    Dimension Permutation
    Reserved (zero)
    Dimension #1 Size (required)
    Dimension #2 Size (required)
    Dimension #3 Size (required)
    Dimension #4 Size (required)

    Member Type Message
    +
    +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Field NameDescription

    Name

    +

    This NUL-terminated string provides a description for the + opaque type. It is NUL-padded to a multiple of 8 bytes.

    +

    Byte Offset of Member

    +

    This is the byte offset of the member within the datatype.

    +

    Dimensionality

    +

    If set to zero, this field indicates a scalar member. If set + to a value greater than zero, this field indicates that the member + is an array of values. For array members, the size of the array is + indicated by the ‘Size of Dimension n’ field in this + message.

    +

    Dimension Permutation

    +

    This field was intended to allow an array field to have its + dimensions permuted, but this was never implemented. This field + should always be set to zero.

    +

    Dimension #n Size

    +

    This field is the size of a dimension of the array field as + stored in the file. The first dimension stored in the list of + dimensions is the slowest changing dimension and the last dimension + stored is the fastest changing dimension.

    +

    Member Type Message

    +

    This field is a datatype message describing the datatype of + the member.

    +
    +
    + +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + +
    Compound Properties Description for Datatype + Version 2
    ByteByteByteByte

    Name
    +
    Byte Offset of Member

    Member Type Message
    +
    +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + +
    Field NameDescription

    Name

    +

    This NUL-terminated string provides a description for the + opaque type. It is NUL-padded to a multiple of 8 bytes.

    +

    Byte Offset of Member

    +

    This is the byte offset of the member within the datatype.

    +

    Member Type Message

    +

    This field is a datatype message describing the datatype of + the member.

    +
    +
    + + +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + +
    Compound Properties Description for Datatype + Version 3
    ByteByteByteByte

    Name
    +
    Byte Offset of Member (variable size)

    Member Type Message
    +
    +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + +
    Field NameDescription

    Name

    + This NUL-terminated string provides a description for the opaque + type. It is not NUL-padded to a multiple of 8 bytes. +

    Byte Offset of Member

    This is the byte offset of the member within the + datatype. The field size is the minimum number of bytes necessary, + based on the size of the datatype element. For example, a datatype + element size of less than 256 bytes uses a 1 byte length, a + datatype element size of 256-65535 bytes uses a 2 byte length, and + so on.

    Member Type Message

    This field is a datatype message describing the + datatype of the member.

    +
    + + +
    +

    Class specific information for Reference (Class 7):

    + +
    + + + + + + + + + + + + + + + + + +
    Reference Bit Field Description
    BitsMeaning

    0-3

    + Type. This four-bit value contains the type of reference + described. The values defined are: + +

    + + + + + + + + + + + + + + + + + + + + +
    ValueDescription
    0Object Reference: A reference to another object in this + HDF5 file.
    1Dataset Region Reference: A reference to a region within + a dataset in this HDF5 file.
    2-15Reserved
    +

    4-23

    Reserved (zero).

    +
    + +

    There are no properties defined for the reference class.

    + + +
    +

    Class specific information for Enumeration (Class 8):

    + +
    + + + + + + + + + + + + + + + + + +
    Enumeration Bit Field Description
    BitsMeaning

    0-15

    + Number of Members. The number of name/value pairs defined + for the enumeration type. +

    16-23

    Reserved (zero).

    +
    + +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + +
    Enumeration Property Description for Datatype + Versions 1 & 2
    ByteByteByteByte

    Base Type
    +

    Names
    +

    Values
    +
    +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + +
    Field NameDescription

    Base Type

    +

    Each enumeration type is based on some parent type, usually + an integer. The information for that parent type is described + recursively by this field.

    +

    Names

    +

    The name for each name/value pair. Each name is stored as a + null terminated ASCII string in a multiple of eight bytes. The + names are in no particular order.

    +

    Values

    +

    The list of values in the same order as the names. The values + are packed (no inter-value padding) and the size of each value is + determined by the parent type.

    +
    +
    + +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + +
    Enumeration Property Description for Datatype + Version 3
    ByteByteByteByte

    Base Type
    +

    Names
    +

    Values
    +
    +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + +
    Field NameDescription

    Base Type

    +

    Each enumeration type is based on some parent type, usually + an integer. The information for that parent type is described + recursively by this field.

    +

    Names

    +

    + The name for each name/value pair. Each name is stored as a null + terminated ASCII string, not padded to a multiple of eight + bytes. The names are in no particular order. +

    +

    Values

    +

    The list of values in the same order as the names. The values + are packed (no inter-value padding) and the size of each value is + determined by the parent type.

    +
    +
    + + + +
    +

    Class specific information for Variable-Length (Class 9):

    + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Variable-Length Bit Field Description
    BitsMeaning

    0-3

    + Type. This four-bit value contains the type of + variable-length datatype described. The values defined are: + +

    + + + + + + + + + + + + + + + + + + + + +
    ValueDescription
    0Sequence: A variable-length sequence of any datatype. + Variable-length sequences do not have padding or character set + information.
    1String: A variable-length sequence of characters. + Variable-length strings have padding and character set + information.
    2-15Reserved
    +

    4-7

    + Padding type. (variable-length string only) This four-bit + value determines the type of padding used for variable-length + strings. The values are the same as for the string padding type, as + follows: +

    + + + + + + + + + + + + + + + + + + + + + + + + + +
    ValueDescription
    0Null terminate: A zero byte marks the end of a string and + is guaranteed to be present after converting a long string to a + short string. When converting a short string to a long string, + the value is padded with additional null characters as necessary. +
    1Null pad: Null characters are added to the end of the + value during conversion from a short string to a longer string. + Conversion from a long string to a shorter string simply + truncates the value.
    2Space pad: Space characters are added to the end of the + value during conversion from a short string to a longer string. + Conversion from a long string to a shorter string simply + truncates the value. This is the Fortran representation of the + string.
    3-15Reserved
    +

    + +

    This value is set to zero for variable-length sequences.

    8-11

    + Character Set. (variable-length string only) This four-bit + value specifies the character set to be used for encoding the + string: +

    + + + + + + + + + + + + + + + + + + + + +
    ValueDescription
    0ASCII character set encoding
    1UTF-8 character set encoding
    2-15Reserved
    +

    + +

    This value is set to zero for variable-length sequences.

    12-23

    Reserved (zero).

    +
    + +
    +
    +
    + + + + + + + + + + + + + + +
    Variable-Length Property Description
    ByteByteByteByte

    Base Type
    +
    +
    + +
    +
    + + + + + + + + + + + +
    Field NameDescription

    Base Type

    +

    Each variable-length type is based on some parent type. The + information for that parent type is described recursively by this + field.

    +
    +
    + + +
    +

    Class specific information for Array (Class 10):

    + +

    There are no bit fields defined for the array class.

    + +

    Note that the dimension information defined in the property for + this datatype class is independent of dataspace information for a + dataset. The dimension information here describes the dimensionality of + the information within a data element (or a component of an element, if + the array datatype is nested within another datatype) and the dataspace + for a dataset describes the size and locations of the elements in a + dataset.

    + + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Array Property Description for Datatype Version 2
    ByteByteByteByte
    DimensionalityReserved (zero)
    Dimension #1 Size
    .
    .
    .
    Dimension #n Size
    Permutation Index #1
    .
    .
    .
    Permutation Index #n

    Base Type
    +
    +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Field NameDescription

    Dimensionality

    +

    This value is the number of dimensions that the array has.

    +

    Dimension #n Size

    +

    This value is the size of the dimension of the array as + stored in the file. The first dimension stored in the list of + dimensions is the slowest changing dimension and the last dimension + stored is the fastest changing dimension.

    +

    Permutation Index #n

    +

    This value is the index permutation used to map each + dimension from the canonical representation to an alternate axis + for each dimension. Currently, dimension permutations are not + supported, and these indices should be set to the index position + minus one. In other words, the first dimension should be set to 0, + the second dimension should be set to 1, and so on.

    +

    Base Type

    +

    Each array type is based on some parent type. The information + for that parent type is described recursively by this field.

    +
    +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Array Property Description for Datatype Version 3
    ByteByteByteByte
    DimensionalityThis space inserted + only to align table nicely
    Dimension #1 Size
    .
    .
    .
    Dimension #n Size

    Base Type
    +
    +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + +
    Field NameDescription

    Dimensionality

    +

    This value is the number of dimensions that the array has.

    +

    Dimension #n Size

    +

    This value is the size of the dimension of the array as + stored in the file. The first dimension stored in the list of + dimensions is the slowest changing dimension and the last dimension + stored is the fastest changing dimension.

    +

    Base Type

    +

    Each array type is based on some parent type. The information + for that parent type is described recursively by this field.

    +
    +
    + + + +
    +

    + IV.A.2.e. The Data Storage - Fill + Value (Old) Message +

    + + +
    + + + + + + + + + + + + + + + + + + + + +
    Header Message Name: Fill Value (old)
    Header Message Type: 0x0004
    Length: Varies
    Status: Optional; may not be repeated.
    Description:

    The fill value message stores a single data value + which is returned to the application when an uninitialized data + element is read from a dataset. The fill value is interpreted with + the same datatype as the dataset. If no fill value message is + present then a fill value of all zero bytes is assumed.

    +

    This fill value message is deprecated in favor of the + “new” fill value message (Message Type 0x0005) and is + only written to the file for forward compatibility with versions of + the HDF5 Library before the 1.6.0 version. Additionally, it only + appears for datasets with a user-defined fill value (as opposed to + the library default fill value or an explicitly set + “undefined” fill value).

    Format of Data: See the tables below.
    +
    + + +
    + + + + + + + + + + + + + + + + + +
    Fill Value Message (Old)
    bytebytebytebyte
    Size

    Fill Value (optional, variable + size)
    +
    +
    + +
    +
    + + + + + + + + + + + + + + + +
    Field NameDescription

    Size

    +

    This is the size of the Fill Value field in bytes.

    +

    Fill Value

    +

    The fill value. The bytes of the fill value are interpreted + using the same datatype as for the dataset.

    +
    +
    + + +
    +

    + IV.A.2.f. The Data Storage - Fill Value + Message +

    + + +
    + + + + + + + + + + + + + + + + + + + + +
    Header Message Name: Fill Value
    Header Message Type: 0x0005
    Length: Varies
    Status: Required for dataset objects; may + not be repeated.
    Description:The fill value message stores a single data value which is + returned to the application when an uninitialized data element is + read from a dataset. The fill value is interpreted with the same + datatype as the dataset.
    Format of Data: See the tables below.
    +
    + + +
    + + + + + + + + + + + + + + + + + + + + + + + + +
    Fill Value Message - Versions 1 & 2
    bytebytebytebyte
    VersionSpace Allocation TimeFill Value Write TimeFill Value Defined
    Size (optional)

    Fill Value (optional, variable + size)
    +
    +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Field NameDescription

    Version

    +

    The version number information is used for changes in the + format of the fill value message and is described here:

    + + + + + + + + + + + + + + + + + + + + + + +
    VersionDescription
    0Never used
    1Initial version of this message.
    2In this version, the Size and Fill Value fields are only + present if the Fill Value Defined field is set to 1.
    3This version packs the other fields in the message more + efficiently than version 2.
    +

    +

    +

    Space Allocation Time

    +

    When the storage space for the dataset’s raw data will + be allocated. The allowed values are:

    + + + + + + + + + + + + + + + + + + + + + + +
    ValueDescription
    0Not used.
    1Early allocation. Storage space for the entire dataset + should be allocated in the file when the dataset is created.
    2Late allocation. Storage space for the entire dataset + should not be allocated until the dataset is written to.
    3Incremental allocation. Storage space for the dataset + should not be allocated until the portion of the dataset is + written to. This is currently used in conjunction with chunked + data storage for datasets.
    +

    + +

    Fill Value Write Time

    +

    At the time that storage space for the dataset’s raw + data is allocated, this value indicates whether the fill value + should be written to the raw data storage elements. The allowed + values are:

    + + + + + + + + + + + + + + + + + + +
    ValueDescription
    0On allocation. The fill value is always written to the + raw data storage when the storage space is allocated.
    1Never. The fill value should never be written to the raw + data storage.
    2Fill value written if set by user. The fill value will be + written to the raw data storage when the storage space is + allocated only if the user explicitly set the fill value. If the + fill value is the library default or is undefined, it will not be + written to the raw data storage.
    +

    + +

    Fill Value Defined

    +

    This value indicates if a fill value is defined for this + dataset. If this value is 0, the fill value is undefined. If this + value is 1, a fill value is defined for this dataset. For version 2 + or later of the fill value message, this value controls the + presence of the Size and Fill Value fields.

    +

    Size

    +

    This is the size of the Fill Value field in bytes. This field + is not present if the Version field is greater than 1, and the Fill + Value Defined field is set to 0.

    +

    Fill Value

    +

    The fill value. The bytes of the fill value are interpreted + using the same datatype as for the dataset. This field is not + present if the Version field is greater than 1, and the Fill Value + Defined field is set to 0.

    +
    +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + +
    Fill Value Message - Version 3
    bytebytebytebyte
    VersionFlagsThis space inserted + only to align table nicely
    Size (optional)

    Fill Value (optional, variable + size)
    +
    +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + +
    Field NameDescription

    Version

    +

    The version number information is used for changes in the + format of the fill value message and is described here:

    + + + + + + + + + + + + + + + + + + + + + + +
    VersionDescription
    0Never used
    1Initial version of this message.
    2In this version, the Size and Fill Value fields are only + present if the Fill Value Defined field is set to 1.
    3This version packs the other fields in the message more + efficiently than version 2.
    +

    + +

    Flags

    +

    When the storage space for the dataset’s raw data will + be allocated. The allowed values are:

    + + + + + + + + + + + + + + + + + + + + + + + + + + +
    BitsDescription
    0-1Space Allocation Time, with the same values as versions 1 + and 2 of the message.
    2-3Fill Value Write Time, with the same values as versions 1 + and 2 of the message.
    4Fill Value Undefined, indicating that the fill value has + been marked as “undefined” for this dataset. Bits 4 + and 5 cannot both be set.
    5Fill Value Defined, with the same values as versions 1 + and 2 of the message. Bits 4 and 5 cannot both be set.
    6-7Reserved (zero).
    +

    + +

    Size

    +

    This is the size of the Fill Value field in bytes. This field + is not present if the Version field is greater than 1, and the Fill + Value Defined flag is set to 0.

    +

    Fill Value

    +

    The fill value. The bytes of the fill value are interpreted + using the same datatype as for the dataset. This field is not + present if the Version field is greater than 1, and the Fill Value + Defined flag is set to 0.

    +
    +
    + + +
    +

    + IV.A.2.g. The Link Message +

    + + +
    + + + + + + + + + + + + + + + + + + + + +
    Header Message Name: Link
    Header Message Type: 0x0006
    Length: Varies
    Status: Optional; may be repeated.
    Description:

    This message encodes the information for a link in a + group’s object header, when the group is storing its links + “compactly”, or in the group’s fractal heap, when + the group is storing its links “densely”.

    +

    + A group is storing its links compactly when the fractal heap + address in the Link Info + Message is set to the “undefined address” value. +

    Format of Data: See the tables below.
    +
    + + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Link Message
    bytebytebytebyte
    VersionFlagsLink type (optional)This space inserted only to align + table nicely

    Creation Order (8 bytes, + optional)
    +
    Link Name Character Set (optional)Length of Link Name (variable size)This space inserted + only to align table nicely
    Link Name (variable size)

    Link Information (variable size)
    +
    +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Field NameDescription

    Version

    The version number for this message. This document + describes version 1.

    Flags

    This field contains information about the link and + controls the presence of other fields below.

    + + + + + + + + + + + + + + + + + + + + + + + + + + +
    BitsDescription
    0-1Determines the size of the Length of Link Name + field. + + + + + + + + + + + + + + + + + + + + + + +
    ValueDescription
    0The size of the Length of Link Name field is + 1 byte. +
    1The size of the Length of Link Name field is + 2 bytes. +
    2The size of the Length of Link Name field is + 4 bytes. +
    3The size of the Length of Link Name field is + 8 bytes. +
    +
    2Creation Order Field Present: if set, the Creation + Order field is present. If not set, creation order information + is not stored for links in this group. +
    3Link Type Field Present: if set, the link is not a hard + link and the Link Type field is present. If not set, the + link is a hard link. +
    4Link Name Character Set Field Present: if set, the link + name is not represented with the ASCII character set and the Link + Name Character Set field is present. If not set, the link name + is represented with the ASCII character set. +
    5-7Reserved (zero).
    +

    Link type

    This is the link class type and can be one of the + following values:

    + + + + + + + + + + + + + + + + + + + + + + + + + + +
    ValueDescription
    0A hard link (should never be stored in the file)
    1A soft link.
    2-63Reserved for future HDF5 internal use.
    64An external link.
    65-255Reserved, but available for user-defined link types.
    +

    + +

    + This field is present if bit 3 of Flags is set. +

    Creation Order

    This 64-bit value is an index of the link’s + creation time within the group. Values start at 0 when the group is + created an increment by one for each link added to the group. + Removing a link from a group does not change existing links’ + creation order field.

    +

    + This field is present if bit 2 of Flags is set. +

    Link Name Character Set

    This is the character set for encoding the + link’s name:

    + + + + + + + + + + + + + + + +
    ValueDescription
    0ASCII character set encoding (this should never be stored + in the file)
    1UTF-8 character set encoding
    +

    + +

    + This field is present if bit 4 of Flags is set. +

    Length of link name

    + This is the length of the link’s name. The size of this field + depends on bits 0 and 1 of Flags. +

    Link name

    This is the name of the link, non-NULL terminated.

    Link information

    + The format of this field depends on the link type. +

    +

    + For hard links, the field is formatted as follows: + +

    + + + + + +
    Size of Offsets bytes:The address of the object header for the + object that the link points to.
    +

    + +

    + For soft links, the field is formatted as follows: + +

    + + + + + + + + + +
    Bytes 1-2:Length of soft link value.
    Length of soft link value bytes:A non-NULL-terminated string storing the value of the + soft link.
    +

    + +

    + For external links, the field is formatted as follows: + +

    + + + + + + + + + +
    Bytes 1-2:Length of external link value.
    Length of external link value bytes:The first byte contains the version number in the upper 4 + bits and flags in the lower 4 bits for the external link. Both + version and flags are defined to be zero in this document. The + remaining bytes consist of two NULL-terminated strings, with no + padding between them. The first string is the name of the HDF5 + file containing the object linked to and the second string is the + full path to the object linked to, within the HDF5 file’s + group hierarchy.
    +

    + +

    + For user-defined links, the field is formatted as follows: + +

    + + + + + + + + + +
    Bytes 1-2:Length of user-defined data.
    Length of user-defined link value bytes:The data supplied for the user-defined link type.
    +

    +
    + +
    +

    + IV.A.2.h. The Data Storage - + External Data Files Message +

    + + +
    + + + + + + + + + + + + + + + + + + + + +
    Header Message Name: External Data Files
    Header Message Type: 0x0007
    Length: Varies
    Status: Optional; may not be repeated.
    Description:The external data storage message indicates that the data + for an object is stored outside the HDF5 file. The filename of the + object is stored as a Universal Resource Location (URL) of the + actual filename containing the data. An external file list record + also contains the byte offset of the start of the data within the + file and the amount of space reserved in the file for that data.
    Format of Data: See the tables below.
    +
    + + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    External File List Message
    bytebytebytebyte
    VersionReserved (zero)
    Allocated SlotsUsed Slots

    Heap AddressO
    +

    Slot Definitions...
    +
    + + + + + + +
     (Items marked with an ‘O’ in the + above table are of the size specified in “Size of + Offsets” field in the superblock.)
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Field NameDescription

    Version

    +

    The version number information is used for changes in the + format of External Data Storage Message and is described here:

    + + + + + + + + + + + + + +
    VersionDescription
    0Never used.
    1The current version used by the library.
    +

    + +

    Allocated Slots

    +

    The total number of slots allocated in the message. Its value + must be at least as large as the value contained in the Used Slots + field. (The current library simply uses the number of Used Slots + for this message)

    +

    Used Slots

    +

    The number of initial slots which contains valid information.

    +

    Heap Address

    +

    This is the address of a local heap which contains the names + for the external files (The local heap information can be found in + Disk Format Level 1D in this document). The name at offset zero in + the heap is always the empty string.

    +

    Slot Definitions

    +

    The slot definitions are stored in order according to the + array addresses they represent.

    +
    +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + +
    External File List Slot
    bytebytebytebyte

    Name Offset in Local HeapL
    +

    Offset in External Data FileL
    +

    Data Size in External FileL
    +
    + + + + + + +
     (Items marked with an ‘L’ in the + above table are of the size specified in “Size of + Lengths” field in the superblock.)
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + +
    Field NameDescription

    Name Offset in Local Heap

    +

    + The byte offset within the local name heap for the name of the + file. File names are stored as a URL which has a protocol name, a + host name, a port number, and a file name: + + protocol:port//host/file + + . If the protocol is omitted then “file:” is assumed. + If the port number is omitted then a default port for that protocol + is used. If both the protocol and the port number are omitted then + the colon can also be omitted. If the double slash and host name + are omitted then “localhost” is assumed. The file name + is the only mandatory part, and if the leading slash is missing + then it is relative to the application’s current working + directory (the use of relative names is not recommended). +

    +

    Offset in External Data File

    +

    This is the byte offset to the start of the data in the + specified file. For files that contain data for a single dataset + this will usually be zero.

    +

    Data Size in External File

    +

    This is the total number of bytes reserved in the specified + file for raw data storage. For a file that contains exactly one + complete dataset which is not extendable, the size will usually be + the exact size of the dataset. However, by making the size larger + one allows HDF5 to extend the dataset. The size can be set to a + value larger than the entire file since HDF5 will read zeroes past + the end of the file without failing.

    +
    +
    + + +
    +

    + IV.A.2.i. The Data Storage - Layout Message +

    + + +
    + + + + + + + + + + + + + + + + + + + + +
    Header Message Name: Data Storage - + Layout
    Header Message Type: 0x0008
    Length: Varies
    Status: Required for datasets; may not be + repeated.
    Description:Data layout describes how the elements of a + multi-dimensional array are stored in the HDF5 file. Three types of + data layout are supported: +
      +
    1. Contiguous: The array is stored in one contiguous area of + the file. This layout requires that the size of the array be + constant: data manipulations such as chunking, compression, + checksums, or encryption are not permitted. The message stores the + total storage size of the array. The offset of an element from the + beginning of the storage area is computed as in a C array.
    2. +
    3. Chunked: The array domain is regularly decomposed into + chunks, and each chunk is allocated and stored separately. This + layout supports arbitrary element traversals, compression, + encryption, and checksums. (these features are described in other + messages). The message stores the size of a chunk instead of the + size of the entire array; the storage size of the entire array can + be calculated by traversing the B-tree that stores the chunk + addresses.
    4. +
    5. Compact: The array is stored in one contiguous block, as + part of this object header message.
    6. +
    +
    Format of Data: See the tables below.
    +
    + + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Data Layout Message (Versions 1 and 2)
    bytebytebytebyte
    VersionDimensionalityLayout ClassReserved (zero)
    Reserved (zero)

    Data AddressO (optional)
    +
    Dimension 0 Size
    Dimension 1 Size
    ...
    Dimension #n Size
    Dataset Element Size (optional)
    Compact Data Size (optional)

    Compact Data... (variable size, + optional)
    +
    + + + + + + +
     (Items marked with an ‘O’ in the + above table are of the size specified in “Size of + Offsets” field in the superblock.)
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Field NameDescription

    Version

    +

    The version number information is used for changes in the + format of the data layout message and is described here:

    + + + + + + + + + + + + + + + + + + + + +
    VersionDescription
    0Never used.
    1Used by version 1.4 and before of the library to encode + layout information. Data space is always allocated when the data + set is created.
    2Used by version 1.6.x of the library to encode layout + information. Data space is allocated only when it is necessary.
    +

    +

    Dimensionality

    An array has a fixed dimensionality. This field + specifies the number of dimension size fields later in the message. + The value stored for chunked storage is 1 greater than the number + of dimensions in the dataset’s dataspace. For example, 2 is + stored for a 1 dimensional dataset.

    Layout Class

    The layout class specifies the type of storage for + the data and how the other fields of the layout message are to be + interpreted.

    + + + + + + + + + + + + + + + + + + + + +
    ValueDescription
    0Compact Storage
    1Contiguous Storage
    2Chunked Storage
    +

    Data Address

    For contiguous storage, this is the address of the + raw data in the file. For chunked storage this is the address of + the v1 B-tree that is used to look up the addresses of the chunks. + This field is not present for compact storage. If the version for + this message is greater than 1, the address may have the + “undefined address” value, to indicate that storage has + not yet been allocated for this array.

    Dimension #n Size

    For contiguous and compact storage the dimensions + define the entire size of the array while for chunked storage they + define the size of a single chunk. In all cases, they are in units + of array elements (not bytes). The first dimension stored in the + list of dimensions is the slowest changing dimension and the last + dimension stored is the fastest changing dimension.

    Dataset Element Size

    The size of a dataset element, in bytes. This field + is only present for chunked storage.

    Compact Data Size

    This field is only present for compact data storage. + It contains the size of the raw data for the dataset array, in + bytes.

    Compact Data

    This field is only present for compact data storage. + It contains the raw data for the dataset array.

    +
    + +
    +

    Version 3 of this message re-structured the format into specific + properties that are required for each layout class.

    + + +
    + + + + + + + + + + + + + + + + + + + +
    + Data Layout Message (Version 3) +
    bytebytebytebyte
    VersionLayout ClassThis space inserted + only to align table nicely

    Properties (variable size)
    +
    +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + +
    Field NameDescription

    Version

    +

    The version number information is used for changes in the + format of layout message and is described here:

    + + + + + + + + + + +
    VersionDescription
    3Used by the version 1.6.3 and later of the library to + store properties for each layout class.
    +

    +

    Layout Class

    The layout class specifies the type of storage for + the data and how the other fields of the layout message are to be + interpreted.

    + + + + + + + + + + + + + + + + + + + + +
    ValueDescription
    0Compact Storage
    1Contiguous Storage
    2Chunked Storage
    +

    Properties

    This variable-sized field encodes information + specific to each layout class and is described below. If there is + no property information specified for a layout class, the size of + this field is zero bytes.

    +
    + +
    +

    Class-specific information for compact layout (Class 0): (Note: + The dimensionality information is in the Dataspace message)

    + + +
    + + + + + + + + + + + + + + + + + + +
    Compact Storage Property Description
    bytebytebytebyte
    SizeThis space inserted + only to align table nicely

    Raw Data... (variable size)
    +
    +
    + +
    +
    + + + + + + + + + + + + + + + +
    Field NameDescription

    Size

    This field contains the size of the raw data for the + dataset array, in bytes.

    Raw Data

    This field contains the raw data for the dataset + array.

    +
    + + +
    +

    Class-specific information for contiguous layout (Class 1): + (Note: The dimensionality information is in the Dataspace message)

    + + +
    + + + + + + + + + + + + + + + + + +
    Contiguous Storage Property Description
    bytebytebytebyte

    AddressO
    +

    SizeL
    +
    + + + + + + + + + + +
     (Items marked with an ‘O’ in the + above table are of the size specified in “Size of + Offsets” field in the superblock.)
     (Items marked with an ‘L’ in the above table are + of the size specified in “Size of Lengths” field in the + superblock.)
    + +
    + +
    +
    + + + + + + + + + + + + + + + +
    Field NameDescription

    Address

    This is the address of the raw data in the file. The + address may have the “undefined address” value, to + indicate that storage has not yet been allocated for this array.

    Size

    This field contains the size allocated to store the + raw data, in bytes.

    +
    + + +
    +

    Class-specific information for chunked layout (Class 2):

    + + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Chunked Storage Property Description
    bytebytebytebyte
    DimensionalityThis space inserted + only to align table nicely

    AddressO
    +
    Dimension 0 Size
    Dimension 1 Size
    ...
    Dimension #n Size
    Dataset Element Size
    + + + + + + +
     (Items marked with an ‘O’ in the + above table are of the size specified in “Size of + Offsets” field in the superblock.)
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + +
    Field NameDescription

    Dimensionality

    A chunk has a fixed dimensionality. This field + specifies the number of dimension size fields later in the message.

    Address

    This is the address of the v1 B-tree that is used to + look up the addresses of the chunks that actually store portions of + the array data. The address may have the “undefined + address” value, to indicate that storage has not yet been + allocated for this array.

    Dimension #n Size

    These values define the dimension size of a single + chunk, in units of array elements (not bytes). The first dimension + stored in the list of dimensions is the slowest changing dimension + and the last dimension stored is the fastest changing dimension.

    Dataset Element Size

    The size of a dataset element, in bytes.

    +
    + +
    +

    + IV.A.2.j. The Bogus Message +

    + + +
    + + + + + + + + + + + + + + + + + + + + +
    Header Message Name: Bogus
    Header Message Type: 0x0009
    Length: 4 bytes
    Status: For testing only; should never be + stored in a valid file.
    Description:This message is used for testing the HDF5 Library’s + response to an “unknown” message type and should never + be encountered in a valid HDF5 file.
    Format of Data: See the tables below.
    +
    + + +
    + + + + + + + + + + + + + +
    Bogus Message
    bytebytebytebyte
    Bogus Value
    +
    + +
    +
    + + + + + + + + + + +
    Field NameDescription

    Bogus Value

    +

    + This value should always be: + 0xdeadbeef + . +

    +
    +
    + +
    +

    + IV.A.2.k. The Group Info Message +

    + + +
    + + + + + + + + + + + + + + + + + + + + +
    Header Message Name: Group Info
    Header Message Type: 0x000A
    Length: Varies
    Status: Optional; may not be repeated.
    Description:

    + This message stores information for the constants defining a + “new style” group’s behavior. Constant + information will be stored in this message and variable information + will be stored in the Link Info + message. +

    +

    Note: the “estimated entry” information below is + used when determining the size of the object header for the group + when it is created.

    Format of Data: See the tables below.
    +
    + + +
    + + + + + + + + + + + + + + + + + + + + + + + + +
    Group Info Message
    bytebytebytebyte
    VersionFlagsLink Phase Change: Maximum Compact Value (optional)
    Link Phase Change: Minimum Dense Value (optional)Estimated Number of Entries (optional)
    Estimated Link Name Length of Entries (optional)This space inserted + only to align table nicely
    +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Field NameDescription

    Version

    The version number for this message. This document + describes version 0.

    Flags

    This is the group information flag with the following + definition:

    + + + + + + + + + + + + + + + + + + +
    BitDescription
    0If set, link phase change values are stored.
    1If set, the estimated entry information is non-default + and is stored.
    2-7Reserved
    +

    Link Phase Change: Maximum Compact Value

    The is the maximum number of links to store + “compactly” (in the group’s object header).

    +

    + This field is present if bit 0 of Flags is set. +

    Link Phase Change: Minimum Dense Value

    + This is the minimum number of links to store “densely” + (in the group’s fractal heap). The fractal heap’s + address is located in the Link Info + message. +

    +

    + This field is present if bit 0 of Flags is set. +

    Estimated Number of Entries

    This is the estimated number of entries in groups.

    +

    + If this field is not present, the default value of + 4 + will be used for the estimated number of group entries. +

    +

    + This field is present if bit 1 of Flags is set. +

    Estimated Link Name Length of Entries

    This is the estimated length of entry name.

    +

    + If this field is not present, the default value of + 8 + will be used for the estimated link name length of group entries. +

    +

    + This field is present if bit 1 of Flags is set. +

    +
    +

    + +
    +

    + IV.A.2.l. The Data Storage - Filter + Pipeline Message +

    + + +
    + + + + + + + + + + + + + + + + + + + + +
    Header Message Name: Data Storage - + Filter Pipeline
    Header Message Type: 0x000B
    Length: Varies
    Status: Optional; may not be repeated.
    Description:

    This message describes the filter pipeline which + should be applied to the data stream by providing filter + identification numbers, flags, a name, and client data.

    +

    This message may be present in the object headers of both + dataset and group objects. For datasets, it specifies the filters + to apply to raw data. For groups, it specifies the filters to apply + to the group’s fractal heap. Currently, only datasets using + chunked data storage use the filter pipeline on their raw data.

    Format of Data: See the tables below.
    +
    + + +
    + + + + + + + + + + + + + + + + + + + + + + + +
    Filter Pipeline Message - Version 1
    bytebytebytebyte
    VersionNumber of FiltersReserved (zero)
    Reserved (zero)

    Filter Description List (variable + size)
    +
    +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + +
    Field NameDescription

    Version

    The version number for this message. This table + describes version 1.

    Number of Filters

    The total number of filters described in this + message. The maximum possible number of filters in a message is 32.

    Filter Description List

    A description of each filter. A filter description + appears in the next table.

    +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Filter Description
    bytebytebytebyte
    Filter Identification ValueName Length
    FlagsNumber Client Data Values

    Name (variable size, optional)
    +

    Client Data (variable size, + optional)
    +
    Padding (variable size, optional)
    +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Field NameDescription

    Filter Identification Value

    +

    + This value, often referred to as a filter identifier, is designed + to be a unique identifier for the filter. Values from zero through + 32,767 are reserved for filters supported by The HDF Group in the + HDF5 Library and for filters requested and supported by third + parties. Filters supported by The HDF Group are documented + immediately below. Information on 3rd-party filters can be found at + The HDF Group’s + Contributions page. +

    + +

    + To request a filter identifier, please contact The HDF + Group’s Help Desk at The HDF Group Help Desk. + You will be asked to provide the following information: +

    +
      +
    1. Contact information for the developer requesting the new + identifier
    2. +
    3. A short description of the new filter
    4. +
    5. Links to any relevant information, including licensing + information
    6. +
    +

    Values from 32768 to 65535 are reserved for non-distributed + uses (for example, internal company usage) or for application usage + when testing a feature. The HDF Group does not track or document + the use of the filters with identifiers from this range.

    + +

    The filters currently in library version 1.8.0 are listed + below:

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    IdentificationNameDescription
    0N/AReserved
    1deflateGZIP deflate compression
    2shuffleData element shuffling
    3fletcher32Fletcher32 checksum
    4szipSZIP compression
    5nbitN-bit packing
    6scaleoffsetScale and offset encoded values
    +

    +

    Name Length

    Each filter has an optional null-terminated ASCII + name and this field holds the length of the name including the null + termination padded with nulls to be a multiple of eight. If the + filter has no name then a value of zero is stored in this field.

    Flags

    The flags indicate certain properties for a filter. + The bit values defined so far are:

    + + + + + + + + + + + + + + + +
    BitDescription
    0If set then the filter is an optional filter. During + output, if an optional filter fails it will be silently skipped + in the pipeline.
    1-15Reserved (zero)
    +

    Number of Client Data Values

    + Each filter can store integer values to control how the filter + operates. The number of entries in the Client Data array + is stored in this field. +

    Name

    + If the Name Length field is non-zero then it will contain + the size of this field, padded to a multiple of eight. This field + contains a null-terminated, ASCII character string to serve as a + comment/name for the filter. +

    Client Data

    + This is an array of four-byte integers which will be passed to the + filter function. The Client Data Number of Values + determines the number of elements in the array. +

    Padding

    Four bytes of zeroes are added to the message at this + point if the Client Data Number of Values field contains an odd + number.

    +
    + +
    +
    + + + + + + + + + + + + + + + + + + + +
    Filter Pipeline Message - Version 2
    bytebytebytebyte
    VersionNumber of FiltersThis space inserted + only to align table nicely

    Filter Description List (variable + size)
    +
    +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + +
    Field NameDescription

    Version

    The version number for this message. This table + describes version 2.

    Number of Filters

    The total number of filters described in this + message. The maximum possible number of filters in a message is 32.

    Filter Description List

    A description of each filter. A filter description + appears in the next table.

    +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Filter Description
    bytebytebytebyte
    Filter Identification ValueName Length (optional)
    FlagsNumber Client Data Values

    Name (variable size, optional)
    +

    Client Data (variable size, + optional)
    +
    +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Field NameDescription

    Filter Identification Value

    +

    + This value, often referred to as a filter identifier, is designed + to be a unique identifier for the filter. Values from zero through + 32,767 are reserved for filters supported by The HDF Group in the + HDF5 Library and for filters requested and supported by third + parties. Filters supported by The HDF Group are documented + immediately below. Information on 3rd-party filters can be found at + The HDF Group’s + Contributions page. +

    + +

    + To request a filter identifier, please contact The HDF + Group’s Help Desk at The HDF Group Help Desk. + You will be asked to provide the following information: +

    +
      +
    1. Contact information for the developer requesting the new + identifier
    2. +
    3. A short description of the new filter
    4. +
    5. Links to any relevant information, including licensing + information
    6. +
    +

    Values from 32768 to 65535 are reserved for non-distributed + uses (for example, internal company usage) or for application usage + when testing a feature. The HDF Group does not track or document + the use of the filters with identifiers from this range.

    + +

    The filters currently in library version 1.8.0 are listed + below:

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    IdentificationNameDescription
    0N/AReserved
    1deflateGZIP deflate compression
    2shuffleData element shuffling
    3fletcher32Fletcher32 checksum
    4szipSZIP compression
    5nbitN-bit packing
    6scaleoffsetScale and offset encoded values
    +

    +

    Name Length

    Each filter has an optional null-terminated ASCII + name and this field holds the length of the name including the null + termination padded with nulls to be a multiple of eight. If the + filter has no name then a value of zero is stored in this field.

    +

    + Filters with IDs less than 256 (in other words, filters that are + defined in this format documentation) do not store the Name + Length or Name fields. +

    Flags

    The flags indicate certain properties for a filter. + The bit values defined so far are:

    + + + + + + + + + + + + + + + +
    BitDescription
    0If set then the filter is an optional filter. During + output, if an optional filter fails it will be silently skipped + in the pipeline.
    1-15Reserved (zero)
    +

    Number of Client Data Values

    + Each filter can store integer values to control how the filter + operates. The number of entries in the Client Data array + is stored in this field. +

    Name

    + If the Name Length field is non-zero then it will contain + the size of this field, not padded to a multiple of eight. + This field contains a non-null-terminated, ASCII character + string to serve as a comment/name for the filter. +

    +

    + Filters that are defined in this format documentation such as + deflate and shuffle do not store the Name Length or Name + fields. +

    Client Data

    + This is an array of four-byte integers which will be passed to the + filter function. The Client Data Number of Values + determines the number of elements in the array. +

    +
    + +
    +

    + IV.A.2.m. The Attribute Message +

    + + +
    + + + + + + + + + + + + + + + + + + + + +
    Header Message Name: Attribute
    Header Message Type: 0x000C
    Length: Varies
    Status: Optional; may be repeated.
    Description:

    + The Attribute message is used to store objects in the HDF5 + file which are used as attributes, or “metadata” about + the current object. An attribute is a small dataset; it has a name, + a datatype, a dataspace, and raw data. Since attributes are stored + in the object header, they should be relatively small (in other + words, less than 64KB). They can be associated with any type of + object which has an object header (groups, datasets, or committed + (named) datatypes). +

    +

    + In 1.8.x versions of the library, attributes can be larger than + 64KB. See the + “Special Issues” section of the Attributes chapter in + the HDF5 User Guide for more information. +

    +

    Note: Attributes on an object must have unique names: the + HDF5 Library currently enforces this by causing the creation of an + attribute with a duplicate name to fail. Attributes on different + objects may have the same name, however.

    Format of Data: See the tables below.
    +
    + + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Attribute Message (Version 1)
    bytebytebytebyte
    VersionReserved (zero)Name Size
    Datatype SizeDataspace Size

    Name (variable size)
    +

    Datatype (variable size)
    +

    Dataspace (variable size)
    +

    Data (variable size)
    +
    +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Field NameDescription

    Version

    The version number information is used for changes in + the format of the attribute message and is described here:

    + + + + + + + + + + + + + + + +
    VersionDescription
    0Never used.
    1Used by the library before version 1.6 to encode + attribute message. This version does not support shared + datatypes.
    +

    Name Size

    + The length of the attribute name in bytes including the null + terminator. Note that the Name field below may contain + additional padding not represented by this field. +

    Datatype Size

    + The length of the datatype description in the Datatype + field below. Note that the Datatype field may contain + additional padding not represented by this field. +

    Dataspace Size

    + The length of the dataspace description in the Dataspace + field below. Note that the Dataspace field may contain + additional padding not represented by this field. +

    Name

    The null-terminated attribute name. This field is + padded with additional null characters to make it a multiple of + eight bytes.

    Datatype

    The datatype description follows the same format as + described for the datatype object header message. This field is + padded with additional zero bytes to make it a multiple of eight + bytes.

    Dataspace

    The dataspace description follows the same format as + described for the dataspace object header message. This field is + padded with additional zero bytes to make it a multiple of eight + bytes.

    Data

    + The raw data for the attribute. The size is determined from the + datatype and dataspace descriptions. This field is not + padded with additional bytes. +

    +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Attribute Message (Version 2)
    bytebytebytebyte
    VersionFlagsName Size
    Datatype SizeDataspace Size

    Name (variable size)
    +

    Datatype (variable size)
    +

    Dataspace (variable size)
    +

    Data (variable size)
    +
    +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Field NameDescription

    Version

    The version number information is used for changes in + the format of the attribute message and is described here:

    + + + + + + + + + + +
    VersionDescription
    2Used by the library of version 1.6.x and after to encode + attribute messages. This version supports shared datatypes. The + fields of name, datatype, and dataspace are not padded with + additional bytes of zero.
    +

    Flags

    This bit field contains extra information about + interpreting the attribute message:

    + + + + + + + + + + + + + + + +
    BitDescription
    0If set, datatype is shared.
    1If set, dataspace is shared.
    +

    Name Size

    The length of the attribute name in bytes including + the null terminator.

    Datatype Size

    + The length of the datatype description in the Datatype + field below. +

    Dataspace Size

    + The length of the dataspace description in the Dataspace + field below. +

    Name

    + The null-terminated attribute name. This field is not + padded with additional bytes. +

    Datatype

    The datatype description follows the same format as + described for the datatype object header message.

    +

    + If the Flag field indicates this attribute’s + datatype is shared, this field will contain a “shared + message” encoding instead of the datatype encoding. +

    +

    + This field is not padded with additional bytes. +

    Dataspace

    The dataspace description follows the same format as + described for the dataspace object header message.

    +

    + If the Flag field indicates this attribute’s + dataspace is shared, this field will contain a “shared + message” encoding instead of the dataspace encoding. +

    +

    + This field is not padded with additional bytes. +

    Data

    The raw data for the attribute. The size is + determined from the datatype and dataspace descriptions.

    +

    + This field is not padded with additional zero bytes. +

    +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Attribute Message (Version 3)
    bytebytebytebyte
    VersionFlagsName Size
    Datatype SizeDataspace Size
    Name Character Set EncodingThis space inserted + only to align table nicely

    Name (variable size)
    +

    Datatype (variable size)
    +

    Dataspace (variable size)
    +

    Data (variable size)
    +
    +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Field NameDescription

    Version

    The version number information is used for changes in + the format of the attribute message and is described here:

    + + + + + + + + + + +
    VersionDescription
    3Used by the library of version 1.8.x and after to encode + attribute messages. This version supports attributes with + non-ASCII names.
    +

    Flags

    This bit field contains extra information about + interpreting the attribute message:

    + + + + + + + + + + + + + + + +
    BitDescription
    0If set, datatype is shared.
    1If set, dataspace is shared.
    +

    Name Size

    The length of the attribute name in bytes including + the null terminator.

    Datatype Size

    + The length of the datatype description in the Datatype + field below. +

    Dataspace Size

    + The length of the dataspace description in the Dataspace + field below. +

    Name Character Set Encoding

    The character set encoding for the attribute’s + name:

    + + + + + + + + + + + + + + + +
    ValueDescription
    0ASCII character set encoding
    1UTF-8 character set encoding
    +

    Name

    + The null-terminated attribute name. This field is not + padded with additional bytes. +

    Datatype

    The datatype description follows the same format as + described for the datatype object header message.

    +

    + If the Flag field indicates this attribute’s + datatype is shared, this field will contain a “shared + message” encoding instead of the datatype encoding. +

    +

    + This field is not padded with additional bytes. +

    Dataspace

    The dataspace description follows the same format as + described for the dataspace object header message.

    +

    + If the Flag field indicates this attribute’s + dataspace is shared, this field will contain a “shared + message” encoding instead of the dataspace encoding. +

    +

    + This field is not padded with additional bytes. +

    Data

    The raw data for the attribute. The size is + determined from the datatype and dataspace descriptions.

    +

    + This field is not padded with additional zero bytes. +

    +
    + +
    +

    + IV.A.2.n. The Object Comment Message +

    + + +
    + + + + + + + + + + + + + + + + + + + + +
    Header Message Name: Object Comment
    Header Message Type: 0x000D
    Length: Varies
    Status: Optional; may not be repeated.
    Description:The object comment is designed to be a short description of + an object. An object comment is a sequence of non-zero (\0) + ASCII characters with no other formatting included by the library. +
    Format of Data: See the tables below.
    +
    + + +
    + + + + + + + + + + + + + +
    Name Message
    bytebytebytebyte

    Comment (variable size)
    +
    +
    + +
    +
    + + + + + + + + + + +
    Field NameDescription

    Name

    A null terminated ASCII character string.

    +
    + +
    +

    + IV.A.2.o. The Object + Modification Time (Old) Message +

    + + +
    + + + + + + + + + + + + + + + + + + + + +
    Header Message Name: Object Modification + Time (Old)
    Header Message Type: 0x000E
    Length: Fixed
    Status: Optional; may not be repeated.
    Description:

    The object modification date and time is a timestamp + which indicates (using ISO-8601 date and time format) the last + modification of an object. The time is updated when any object + header message changes according to the system clock where the + change was posted. All fields of this message should be interpreted + as coordinated universal time (UTC).

    +

    + This modification time message is deprecated in favor of the + “new” Object + Modification Time message and is no longer written to the file in + versions of the HDF5 Library after the 1.6.0 version. +

    Format of Data: See the tables below.
    +
    + + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Modification Time Message
    bytebytebytebyte
    Year
    MonthDay of Month
    HourMinute
    SecondReserved
    +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Field NameDescription

    Year

    + The four-digit year as an ASCII string. For example, + 1998 + . +

    Month

    + The month number as a two digit ASCII string where January is + 01 + and December is + 12 + . +

    Day of Month

    + The day number within the month as a two digit ASCII string. The + first day of the month is + 01 + . +

    Hour

    + The hour of the day as a two digit ASCII string where midnight is + 00 + and 11:00pm is + 23 + . +

    Minute

    + The minute of the hour as a two digit ASCII string where the first + minute of the hour is + 00 + and the last is + 59 + . +

    Second

    + The second of the minute as a two digit ASCII string where the + first second of the minute is + 00 + and the last is + 59 + . +

    Reserved

    This field is reserved and should always be zero.

    +
    + +
    +

    + IV.A.2.p. The Shared Message Table + Message +

    + + +
    + + + + + + + + + + + + + + + + + + + + +
    Header Message Name: Shared Message Table
    Header Message Type: 0x000F
    Length: Fixed
    Status: Optional; may not be repeated.
    Description:This message is used to locate the table of shared object + header message (SOHM) indexes. Each index consists of information to + find the shared messages from either the heap or object header. This + message is only found in the superblock extension. +
    Format of Data: See the tables below.
    +
    + + +
    + + + + + + + + + + + + + + + + + + + + + + + + +
    Shared Message Table Message
    bytebytebytebyte
    VersionThis space inserted + only to align table nicely

    Shared Object Header Message Table + AddressO
    +
    Number of IndicesThis space inserted + only to align table nicely
    + + + + + + +
     (Items marked with an ‘O’ in the + above table are of the size specified in “Size of + Offsets” field in the superblock.)
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + +
    Field NameDescription

    Version

    The version number for this message. This document + describes version 0.

    Shared Object Header Message Table Address

    This field is the address of the master table for + shared object header message indexes.

    Number of Indices

    This field is the number of indices in the master + table.

    +
    + +
    +

    + IV.A.2.q. The Object Header + Continuation Message +

    + + +
    + + + + + + + + + + + + + + + + + + + + +
    Header Message Name: Object Header + Continuation
    Header Message Type: 0x0010
    Length: Fixed
    Status: Optional; may be repeated.
    Description:The object header continuation is the location in the file + of a block containing more header messages for the current data + object. This can be used when header blocks become too large or are + likely to change over time.
    Format of Data: See the tables below.
    +
    + + +
    + + + + + + + + + + + + + + + + + +
    Object Header Continuation Message
    bytebytebytebyte

    OffsetO
    +

    LengthL
    +
    + + + + + + + + + + +
     (Items marked with an ‘O’ in the + above table are of the size specified in “Size of + Offsets” field in the superblock.)
     (Items marked with an ‘L’ in the above table are + of the size specified in “Size of Lengths” field in the + superblock.)
    + +
    + +
    +
    + + + + + + + + + + + + + + + +
    Field NameDescription

    Offset

    This value is the address in the file where the + header continuation block is located.

    Length

    This value is the length in bytes of the header + continuation block in the file.

    +
    +
    + +

    The format of the header continuation block that this message + points to depends on the version of the object header that the message + is contained within.

    + +

    + Continuation blocks for version 1 object headers have no special + formatting information; they are merely a list of object header message + info sequences (type, size, flags, reserved bytes and data for each + message sequence). See the description of Version 1 Data Object Header Prefix. +

    + +

    + Continuation blocks for version 2 object headers do have + special formatting information as described here (see also the + description of Version 2 Data + Object Header Prefix.): +

    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Version 2 Object Header Continuation Block
    bytebytebytebyte
    Signature
    Header Message Type #1Size of Header Message Data #1Header Message #1 Flags
    Header Message #1 Creation Order (optional)This space inserted + only to align table nicely

    Header Message Data #1
    +
    .
    .
    .
    Header Message Type #nSize of Header Message Data #nHeader Message #n Flags
    Header Message #n Creation Order (optional)This space inserted + only to align table nicely

    Header Message Data #n
    +
    Gap (optional, variable size)
    Checksum
    +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Field NameDescription

    Signature

    +

    + The ASCII character string “ + OCHK + ” is used to indicate the beginning of an object header + continuation block. This gives file consistency checking utilities + a better chance of reconstructing a damaged file. +

    +

    Header Message #n Type

    +

    Same format as version 1 of the object header, described + above.

    +

    Size of Header Message #n Data

    +

    Same format as version 1 of the object header, described + above.

    +

    Header Message #n Flags

    +

    Same format as version 1 of the object header, described + above.

    +

    Header Message #n Creation Order

    +

    This field stores the order that a message of a given type + was created in.

    +

    + This field is present if bit 2 of flags is set. +

    +

    Header Message #n Data

    +

    Same format as version 1 of the object header, described + above.

    +

    Gap

    +

    A gap in an object header chunk is inferred by the end of the + messages for the chunk before the beginning of the chunk’s + checksum. Gaps are always smaller than the size of an object header + message prefix (message type + message size + message flags).

    +

    Gaps are formed when a message (typically an attribute + message) in an earlier chunk is deleted and a message from a later + chunk that does not quite fit into the free space is moved into the + earlier chunk.

    +

    Checksum

    +

    This is the checksum for the object header chunk.

    +
    +
    + +
    +

    + IV.A.2.r. The Symbol Table Message +

    + + +
    + + + + + + + + + + + + + + + + + + + + +
    Header Message Name: Symbol Table Message
    Header Message Type: 0x0011
    Length: Fixed
    Status: Required for “old + style” groups; may not be repeated.
    Description:Each “old style” group has a v1 B-tree and a + local heap for storing symbol table entries, which are located with + this message.
    Format of data: See the tables below.
    +
    + +
    + + -
    -

    Class-specific information for chunked layout (Class 2):

    - - -
    -
    + Symbol Table Message +
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - Chunked Storage Property Description -
    bytebytebytebyte
    DimensionalityThis space inserted only to align table nicely

    AddressO

    Dimension 0 Size
    Dimension 1 Size
    ...
    Dimension #n Size
    Dataset Element Size
    + + byte + byte + byte + byte + - - - -
      - (Items marked with an ‘O’ in the above table are of the size - specified in “Size of Offsets” field in the superblock.) -
    +
    v1 B-tree AddressO
    +
    + -
    + +
    Local Heap AddressO
    +
    + + -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameDescription

    Dimensionality

    A chunk has a fixed dimensionality. This field specifies - the number of dimension size fields later in the message.

    Address

    This is the address of the v1 B-tree that is used to look up the - addresses of the chunks that actually store portions of the array - data. The address may have the “undefined address” value, to - indicate that storage has not yet been allocated for this array.

    Dimension #n Size

    These values define the dimension size of a single chunk, in - units of array elements (not bytes). The first dimension stored in - the list of dimensions is the slowest changing dimension and the - last dimension stored is the fastest changing dimension. -

    -

    Dataset Element Size

    The size of a dataset element, in bytes. -

    -
    -
    + + + + + +
     (Items marked with an ‘O’ in the + above table are of the size specified in “Size of + Offsets” field in the superblock.)
    + +

    -

    IV.A.2.j. The Bogus Message

    +
    + + + + + - -
    +
    + + + + + + + + +
    Field NameDescription

    v1 B-tree Address

    This value is the address of the v1 B-tree containing + the symbol table entries for the group.

    Local Heap Address

    This value is the address of the local heap + containing the link names for the symbol table entries for the + group.

    +
    + +
    +

    + IV.A.2.s. The Object Modification + Time Message +

    + + +
    - - - - - - - -
    Header Message Name: Bogus
    Header Message Type: 0x0009
    Length: 4 bytes
    Status: For testing only; should never - be stored in a valid file.
    Description:This message is used for testing the HDF5 Library’s - response to an “unknown” message type and should - never be encountered in a valid HDF5 file.
    Format of Data: See the tables - below.
    - - -
    - - - - - - - - - - - - - -
    - Bogus Message -
    bytebytebytebyte
    Bogus Value
    -
    + + Header Message Name: Object Modification + Time + + + Header Message Type: 0x0012 + + + Length: Fixed + + + Status: Optional; may not be repeated. + + + Description: + The object modification time is a timestamp which indicates + the time of the last modification of an object. The time is updated + when any object header message changes according to the system clock + where the change was posted. + + + Format of Data: See the tables below. + + + + -
    -
    - - - - - - - - - - -
    Field NameDescription

    Bogus Value

    -

    This value should always be: 0xdeadbeef.

    -
    -
    +
    + + + + + + + + + + + + + + + + + + +
    Modification Time Message
    bytebytebytebyte
    VersionReserved (zero)
    Seconds After UNIX Epoch
    +

    -

    IV.A.2.k. The Group Info Message -

    +
    + + + + + - -
    +
    + + + + + + + + +
    Field NameDescription

    Version

    The version number is used for changes in the format + of Object Modification Time and is described here:

    + + + + + + + + + + + + + + + +
    VersionDescription
    0Never used.
    1Used by Version 1.6.1 and after of the library to encode + time. In this version, the time is the seconds after Epoch.
    +

    Seconds After UNIX Epoch

    A 32-bit unsigned integer value that stores the + number of seconds since 0 hours, 0 minutes, 0 seconds, January 1, + 1970, Coordinated Universal Time.

    +
    + +
    +

    + IV.A.2.t. The B-tree ‘K’ + Values Message +

    + + +
    - - - - - - - -
    Header Message Name: Group Info
    Header Message Type: 0x000A
    Length: Varies
    Status: Optional; may not be - repeated.
    Description:

    This message stores information for the constants defining - a “new style” group’s behavior. Constant - information will be stored in this message and variable - information will be stored in the - Link Info message.

    -

    Note: the “estimated entry” information below is - used when determining the size of the object header for the - group when it is created.

    Format of Data: See the tables - below.
    - - -
    - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + +
    - Group Info Message -
    bytebytebytebyte
    VersionFlagsLink Phase Change: Maximum Compact Value (optional)
    Link Phase Change: Minimum Dense Value (optional)Estimated Number of Entries (optional)
    Estimated Link Name Length of Entries (optional)This space inserted only to align table nicely
    Header Message Name: B-tree + ‘K’ Values
    Header Message Type: 0x0013
    Length: Fixed
    Status: Optional; may not be repeated.
    Description:This message retrieves non-default ‘K’ values + for internal and leaf nodes of a group or indexed storage v1 + B-trees. This message is only found in the superblock + extension. +
    Format of Data: See the tables below.
    + + - -
    +
    + + -
    -
    -
    B-tree ‘K’ Values Message
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + -
    Field NameDescription

    Version

    The version number for this message. This document describes version 0.

    -

    Flags

    This is the group information flag with the following definition: - - - - - - - - - - - - - - - - - - - -
    BitDescription
    0If set, link phase change values are stored. -
    1If set, the estimated entry information is non-default - and is stored. -
    2-7Reserved

    -

    Link Phase Change: Maximum Compact Value

    The is the maximum number of links to store “compactly” (in - the group’s object header).

    -

    This field is present if bit 0 of Flags is set.

    -

    Link Phase Change: Minimum Dense Value

    This is the minimum number of links to store “densely” (in - the group’s fractal heap). The fractal heap’s address is - located in the Link Info - message.

    -

    This field is present if bit 0 of Flags is set.

    -

    Estimated Number of Entries

    This is the estimated number of entries in groups.

    -

    If this field is not present, the default value of 4 - will be used for the estimated number of group entries.

    -

    This field is present if bit 1 of Flags is set.

    -

    Estimated Link Name Length of Entries

    This is the estimated length of entry name.

    -

    If this field is not present, the default value of 8 - will be used for the estimated link name length of group entries.

    -

    This field is present if bit 1 of Flags is set.

    -
    bytebytebytebyte
    -
    -

    + + Version + Indexed Storage Internal Node K + This space inserted only to align + table nicely + + + + Group Internal Node K + Group Leaf Node K + + +
    -

    IV.A.2.l. The Data Storage - Filter -Pipeline Message

    +
    + + + + + - -
    +
    + + + + + + + + + + + + + + + + + + + +
    Field NameDescription

    Version

    The version number for this message. This document + describes version 0.

    Indexed Storage Internal Node K

    This is the node ‘K’ value for each + internal node of an indexed storage v1 B-tree. See the description + of this field in version 0 and 1 of the superblock as well the + section on v1 B-trees.

    Group Internal Node K

    This is the node ‘K’ value for each + internal node of a group v1 B-tree. See the description of this + field in version 0 and 1 of the superblock as well as the section + on v1 B-trees.

    Group Leaf Node K

    This is the node ‘K’ value for each leaf + node of a group v1 B-tree. See the description of this field in + version 0 and 1 of the superblock as well as the section on v1 + B-trees.

    +
    + +
    +

    + IV.A.2.u. The Driver Info Message +

    + + +
    - - - - - - - -
    Header Message Name: - Data Storage - Filter Pipeline
    Header Message Type: 0x000B
    Length: Varies
    Status: Optional; may not be - repeated.
    Description:

    This message describes the filter pipeline which should - be applied to the data stream by providing filter identification - numbers, flags, a name, and client data.

    -

    This message may be present in the object headers of both - dataset and group objects. For datasets, it specifies the - filters to apply to raw data. For groups, it specifies the - filters to apply to the group’s fractal heap. Currently, - only datasets using chunked data storage use the filter - pipeline on their raw data.

    Format of Data: See the tables - below.
    - - -
    - - - - - - - - - - - - - - - - - - - - - - - -
    - Filter Pipeline Message - Version 1 -
    bytebytebytebyte
    VersionNumber of FiltersReserved (zero)
    Reserved (zero)

    Filter Description List (variable size)

    -
    + + Header Message Name: Driver Info + + + Header Message Type: 0x0014 + + + Length: Varies + + + Status: Optional; may not be repeated. + -
    -
    - - - - - - - - - - - - - - - - - - - - -
    Field NameDescription

    Version

    The version number for this message. This table - describes version 1.

    Number of Filters

    The total number of filters described in this - message. The maximum possible number of filters in a - message is 32.

    Filter Description List

    A description of each filter. A filter description - appears in the next table.

    -
    + + Description: + This message contains information needed by the file driver + to reopen a file. This message is only found in the + superblock extension: see the + “Disk Format: Level 0C - Superblock Extension” section + for more information. For more information on the fields in the + driver info message, see the “Disk + Format : Level 0B - File Driver Info” section; those who use + the multi and family file drivers will find this section + particularly helpful. + + + + Format of Data: See the tables below. + + + + -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - Filter Description -
    bytebytebytebyte
    Filter Identification ValueName Length
    FlagsNumber Client Data Values

    Name (variable size, optional)


    Client Data (variable size, optional)

    Padding (variable size, optional)
    -
    +
    + + -
    -
    -
    Driver Info Message
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameDescription

    Filter Identification Value

    -

    - This value, often referred to as a filter identifier, - is designed to be a unique identifier for the filter. - Values from zero through 32,767 are reserved for filters - supported by The HDF Group in the HDF5 Library and for - filters requested and supported by third parties. - Filters supported by The HDF Group are documented immediately - below. Information on 3rd-party filters can be found at - The HDF Group’s - - Contributions page.

    - -

    - To request a filter identifier, please contact - The HDF Group’s Help Desk at - The HDF Group Help Desk. - You will be asked to provide the following information:

    -
      -
    1. Contact information for the developer requesting the - new identifier
    2. -
    3. A short description of the new filter
    4. -
    5. Links to any relevant information, including licensing - information
    6. -
    -

    - Values from 32768 to 65535 are reserved for non-distributed uses - (for example, internal company usage) or for application usage - when testing a feature. The HDF Group does not track or document - the use of the filters with identifiers from this range.

    - -

    - The filters currently in library version 1.8.0 are - listed below: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    IdentificationNameDescription
    0N/AReserved
    1deflateGZIP deflate compression
    2shuffleData element shuffling
    3fletcher32Fletcher32 checksum
    4szipSZIP compression
    5nbitN-bit packing
    6scaleoffsetScale and offset encoded values
    -

    Name Length

    Each filter has an optional null-terminated ASCII name - and this field holds the length of the name including the - null termination padded with nulls to be a multiple of - eight. If the filter has no name then a value of zero is - stored in this field.

    Flags

    The flags indicate certain properties for a filter. The - bit values defined so far are: - - - - - - - - - - - - - - - -
    BitDescription
    0If set then the filter is an optional filter. - During output, if an optional filter fails it will be - silently skipped in the pipeline.
    1-15Reserved (zero)

    -

    Number of Client Data Values

    Each filter can store integer values to control - how the filter operates. The number of entries in the - Client Data array is stored in this field.

    Name

    If the Name Length field is non-zero then it will - contain the size of this field, padded to a multiple of eight. This - field contains a null-terminated, ASCII character - string to serve as a comment/name for the filter.

    Client Data

    This is an array of four-byte integers which will be - passed to the filter function. The Client Data Number of - Values determines the number of elements in the array.

    Padding

    Four bytes of zeroes are added to the message at this - point if the Client Data Number of Values field contains - an odd number.

    -
    + + byte + byte + byte + byte + -
    -
    - - - - - - - - - - - - - - - - - - - -
    - Filter Pipeline Message - Version 2 -
    bytebytebytebyte
    VersionNumber of FiltersThis space inserted only to align table nicely

    Filter Description List (variable size)

    -
    + + Version + This space inserted + only to align table nicely + + +
    Driver Identification + -
    -
    - - - - - - - - - - - - - - - - - - - - -
    Field NameDescription

    Version

    The version number for this message. This table - describes version 2.

    Number of Filters

    The total number of filters described in this - message. The maximum possible number of filters in a - message is 32.

    Filter Description List

    A description of each filter. A filter description - appears in the next table.

    -
    + + Driver Information Size + This space inserted + only to align table nicely + -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - Filter Description -
    bytebytebytebyte
    Filter Identification ValueName Length (optional)
    FlagsNumber Client Data Values

    Name (variable size, optional)


    Client Data (variable size, optional)

    -
    + +
    +
    Driver Information (variable size)
    +
    +
    + -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameDescription

    Filter Identification Value

    -

    - This value, often referred to as a filter identifier, - is designed to be a unique identifier for the filter. - Values from zero through 32,767 are reserved for filters - supported by The HDF Group in the HDF5 Library and for - filters requested and supported by third parties. - Filters supported by The HDF Group are documented immediately - below. Information on 3rd-party filters can be found at - The HDF Group’s - - Contributions page.

    - -

    - To request a filter identifier, please contact - The HDF Group’s Help Desk at - The HDF Group Help Desk. - You will be asked to provide the following information:

    -
      -
    1. Contact information for the developer requesting the - new identifier
    2. -
    3. A short description of the new filter
    4. -
    5. Links to any relevant information, including licensing - information
    6. -
    -

    - Values from 32768 to 65535 are reserved for non-distributed uses - (for example, internal company usage) or for application usage - when testing a feature. The HDF Group does not track or document - the use of the filters with identifiers from this range.

    - -

    - The filters currently in library version 1.8.0 are - listed below: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    IdentificationNameDescription
    0N/AReserved
    1deflateGZIP deflate compression
    2shuffleData element shuffling
    3fletcher32Fletcher32 checksum
    4szipSZIP compression
    5nbitN-bit packing
    6scaleoffsetScale and offset encoded values
    -

    Name Length

    Each filter has an optional null-terminated ASCII name - and this field holds the length of the name including the - null termination padded with nulls to be a multiple of - eight. If the filter has no name then a value of zero is - stored in this field.

    -

    Filters with IDs less than 256 (in other words, filters - that are defined in this format documentation) do not store - the Name Length or Name fields. -

    -

    Flags

    The flags indicate certain properties for a filter. The - bit values defined so far are: - - - - - - - - - - - - - - - -
    BitDescription
    0If set then the filter is an optional filter. - During output, if an optional filter fails it will be - silently skipped in the pipeline.
    1-15Reserved (zero)

    -

    Number of Client Data Values

    Each filter can store integer values to control - how the filter operates. The number of entries in the - Client Data array is stored in this field.

    Name

    If the Name Length field is non-zero then it will - contain the size of this field, not padded to a multiple - of eight. This field contains a non-null-terminated, - ASCII character string to serve as a comment/name for the filter. -

    -

    Filters that are defined in this format documentation - such as deflate and shuffle do not store the Name - Length or Name fields. -

    -

    Client Data

    This is an array of four-byte integers which will be - passed to the filter function. The Client Data Number of - Values determines the number of elements in the array.

    -
    -
    + +
    -

    IV.A.2.m. The Attribute Message

    +
    + + + + + - -
    +
    + + + + + + + + + + + + + + + + + + +
    Field NameDescription

    Version

    The version number for this message. This document + describes version 0.

    Driver Identification

    This is an eight-byte ASCII string without null + termination which identifies the driver.

    Driver Information Size

    + The size in bytes of the Driver Information field of this + message. +

    Driver Information

    Driver information is stored in a format defined by + the file driver.

    +
    + +
    +

    + IV.A.2.v. The Attribute Info Message +

    + + +
    - - - - - - - -
    Header Message Name: Attribute
    Header Message Type: 0x000C
    Length: Varies
    Status: Optional; may be - repeated.
    Description:

    The Attribute message is used to store objects - in the HDF5 file which are used as attributes, or - “metadata” about the current object. An attribute - is a small dataset; it has a name, a datatype, a dataspace, and - raw data. Since attributes are stored in the object header, they - should be relatively small (in other words, less than 64KB). - They can be associated with any type of object which has an - object header (groups, datasets, or committed (named) - datatypes).

    -

    In 1.8.x versions of the library, attributes can be larger - than 64KB. See the - - “Special Issues” section of the Attributes chapter - in the HDF5 User’s Guide for more information.

    -

    Note: Attributes on an object must have unique names: - the HDF5 Library currently enforces this by causing the - creation of an attribute with a duplicate name to fail. - Attributes on different objects may have the same name, - however.

    Format of Data: See the tables - below.
    - - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - Attribute Message (Version 1) -
    bytebytebytebyte
    VersionReserved (zero)Name Size
    Datatype SizeDataspace Size

    Name (variable size)


    Datatype (variable size)


    Dataspace (variable size)


    Data (variable size)

    -
    + + Header Message Name: Attribute Info + + + Header Message Type: 0x0015 + + + Length: Varies + + + Status: Optional; may not be repeated. + + + Description: + This message stores information about the attributes on an + object, such as the maximum creation index for the attributes + created and the location of the attribute storage when the + attributes are stored “densely”. + + + Format of Data: See the tables below. + + + + -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameDescription

    Version

    The version number information is used for changes in the format of the - attribute message and is described here: - - - - - - - - - - - - - - - -
    VersionDescription
    0Never used.
    1Used by the library before version 1.6 to encode attribute message. - This version does not support shared datatypes.

    -

    Name Size

    The length of the attribute name in bytes including the - null terminator. Note that the Name field below may - contain additional padding not represented by this - field.

    Datatype Size

    The length of the datatype description in the Datatype - field below. Note that the Datatype field may contain - additional padding not represented by this field.

    Dataspace Size

    The length of the dataspace description in the Dataspace - field below. Note that the Dataspace field may contain - additional padding not represented by this field.

    Name

    The null-terminated attribute name. This field is - padded with additional null characters to make it a - multiple of eight bytes.

    Datatype

    The datatype description follows the same format as - described for the datatype object header message. This - field is padded with additional zero bytes to make it a - multiple of eight bytes.

    Dataspace

    The dataspace description follows the same format as - described for the dataspace object header message. This - field is padded with additional zero bytes to make it a - multiple of eight bytes.

    Data

    The raw data for the attribute. The size is determined - from the datatype and dataspace descriptions. This - field is not padded with additional bytes.

    -
    +
    + + -
    -
    -
    Attribute Info Message
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - Attribute Message (Version 2) -
    bytebytebytebyte
    VersionFlagsName Size
    Datatype SizeDataspace Size

    Name (variable size)


    Datatype (variable size)


    Dataspace (variable size)


    Data (variable size)

    -
    + + byte + byte + byte + byte + -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameDescription

    Version

    The version number information is used for changes in the - format of the attribute message and is described here: - - - - - - - - - - -
    VersionDescription
    2Used by the library of version 1.6.x and after to encode - attribute messages. - This version supports shared datatypes. The fields of - name, datatype, and dataspace are not padded with - additional bytes of zero. -

    -

    Flags

    This bit field contains extra information about - interpreting the attribute message: - - - - - - - - - - - - - - - - -
    BitDescription
    0If set, datatype is shared.
    1If set, dataspace is shared.

    -

    Name Size

    The length of the attribute name in bytes including the - null terminator.

    Datatype Size

    The length of the datatype description in the Datatype - field below.

    Dataspace Size

    The length of the dataspace description in the Dataspace - field below.

    Name

    The null-terminated attribute name. This field is not - padded with additional bytes.

    Datatype

    The datatype description follows the same format as - described for the datatype object header message. -

    -

    If the - Flag field indicates this attribute’s datatype is - shared, this field will contain a “shared message” encoding - instead of the datatype encoding. -

    -

    This field is not padded with additional bytes. -

    -

    Dataspace

    The dataspace description follows the same format as - described for the dataspace object header message. -

    -

    If the - Flag field indicates this attribute’s dataspace is - shared, this field will contain a “shared message” encoding - instead of the dataspace encoding. -

    -

    This field is not padded with additional bytes.

    -

    Data

    The raw data for the attribute. The size is determined - from the datatype and dataspace descriptions. -

    -

    This field is not padded with additional zero bytes. -

    -
    -
    + + Version + Flags + Maximum Creation Index (optional) + + +
    Fractal Heap AddressO
    +
    + + +
    Attribute Name v2 B-tree AddressO
    +
    + + +
    Attribute Creation Order v2 B-tree + AddressO (optional)
    +
    + -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - Attribute Message (Version 3) -
    bytebytebytebyte
    VersionFlagsName Size
    Datatype SizeDataspace Size
    Name Character Set EncodingThis space inserted only to align table nicely

    Name (variable size)


    Datatype (variable size)


    Dataspace (variable size)


    Data (variable size)

    -
    + -
    -
    - - - - - - - - - + + + + + + + + + + + + + + + + +
    Field NameDescription

    Version

    The version number information is used for changes in the - format of the attribute message and is described here: + + + + + +
     (Items marked with an ‘O’ in the + above table are of the size specified in “Size of + Offsets” field in the superblock.)
    + + + +
    +

    + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameDescription

    Version

    The version number for this message. This document + describes version 0.

    Flags

    This is the attribute index information flag with the + following definition:

    - - - - - - - - - -
    VersionDescription
    3Used by the library of version 1.8.x and after to - encode attribute messages. - This version supports attributes with non-ASCII names. -

    -

    Flags

    This bit field contains extra information about - interpreting the attribute message: - - - - - - - - - - - - - - - - -
    BitDescription
    0If set, datatype is shared.
    1If set, dataspace is shared.

    -

    Name Size

    The length of the attribute name in bytes including the - null terminator.

    Datatype Size

    The length of the datatype description in the Datatype - field below.

    Dataspace Size

    The length of the dataspace description in the Dataspace - field below.

    Name Character Set Encoding

    The character set encoding for the attribute’s name: - - - - - - - - - - - - - - - -
    ValueDescription
    0ASCII character set encoding -
    1UTF-8 character set encoding -
    -

    -

    Name

    The null-terminated attribute name. This field is not - padded with additional bytes.

    Datatype

    The datatype description follows the same format as - described for the datatype object header message. -

    -

    If the - Flag field indicates this attribute’s datatype is - shared, this field will contain a “shared message” encoding - instead of the datatype encoding. -

    -

    This field is not padded with additional bytes. -

    -

    Dataspace

    The dataspace description follows the same format as - described for the dataspace object header message. -

    -

    If the - Flag field indicates this attribute’s dataspace is - shared, this field will contain a “shared message” encoding - instead of the dataspace encoding. -

    -

    This field is not padded with additional bytes.

    -

    Data

    The raw data for the attribute. The size is determined - from the datatype and dataspace descriptions. -

    -

    This field is not padded with additional zero bytes. -

    -
    -
    +
    BitDescription
    0If set, creation order for attributes is tracked.
    1If set, creation order for attributes is indexed.
    2-7Reserved
    +

    + + + +

    Maximum Creation Index

    +

    The is the maximum creation order index value for the + attributes on the object.

    +

    + This field is present if bit 0 of Flags is set. +

    + + + +

    Fractal Heap Address

    +

    This is the address of the fractal heap to store + dense attributes.

    + -
    -

    IV.A.2.n. The Object Comment -Message

    + +

    Attribute Name v2 B-tree Address

    +

    This is the address of the version 2 B-tree to index + the names of densely stored attributes.

    + - -
    - - - - - - - - -
    Header Message Name: Object - Comment
    Header Message Type: 0x000D
    Length: Varies
    Status: Optional; may not be - repeated.
    Description:The object comment is designed to be a short description of - an object. An object comment is a sequence of non-zero - (\0) ASCII characters with no other formatting - included by the library.
    Format of Data: See the tables - below.
    - - -
    - - - - - - - - - - - - - -
    - Name Message -
    bytebytebytebyte

    Comment (variable size)

    -
    + +

    Attribute Creation Order v2 B-tree Address

    +

    This is the address of the version 2 B-tree to index + the creation order of densely stored attributes.

    +

    + This field is present if bit 1 of Flags is set. +

    + -
    -
    - - - - - - - - - - -
    Field NameDescription

    Name

    A null terminated ASCII character string.

    -
    + +

    -

    IV.A.2.o. The Object -Modification Time (Old) Message

    +

    + IV.A.2.w. The Object Reference Count + Message +

    - -
    + +
    - - - - - - - -
    Header Message Name: Object - Modification Time (Old)
    Header Message Type: 0x000E
    Length: Fixed
    Status: Optional; may not be - repeated.
    Description:

    The object modification date and time is a timestamp - which indicates (using ISO-8601 date and time format) the last - modification of an object. The time is updated when any object - header message changes according to the system clock where the - change was posted. All fields of this message should be - interpreted as coordinated universal time (UTC).

    -

    This modification time message is deprecated in favor of - the “new” Object - Modification Time message and is no longer written to the - file in versions of the HDF5 Library after the 1.6.0 - version.

    Format of Data: See the tables - below.
    - - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - Modification Time Message -
    bytebytebytebyte
    Year
    MonthDay of Month
    HourMinute
    SecondReserved
    -
    + + Header Message Name: Object Reference + Count + + + Header Message Type: 0x0016 + + + Length: Fixed + + + Status: Optional; may not be repeated. + + + Description: + This message stores the number of hard links (in groups or + objects) pointing to an object: in other words, its reference + count. + + + + Format of Data: See the tables below. + + +
    + -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameDescription

    Year

    The four-digit year as an ASCII string. For example, - 1998. -

    Month

    The month number as a two digit ASCII string where - January is 01 and December is 12.

    Day of Month

    The day number within the month as a two digit ASCII - string. The first day of the month is 01.

    Hour

    The hour of the day as a two digit ASCII string where - midnight is 00 and 11:00pm is 23.

    Minute

    The minute of the hour as a two digit ASCII string where - the first minute of the hour is 00 and - the last is 59.

    Second

    The second of the minute as a two digit ASCII string - where the first second of the minute is 00 - and the last is 59.

    Reserved

    This field is reserved and should always be zero.

    -
    +
    + + -
    -

    IV.A.2.p. The Shared Message Table -Message

    + + + + + + - -
    -
    Object Reference Count
    bytebytebytebyte
    - - - - - - - -
    Header Message Name: Shared Message - Table
    Header Message Type: 0x000F
    Length: Fixed
    Status: Optional; may not be - repeated.
    Description:This message is used to locate the table of shared object - header message (SOHM) indexes. Each index consists of information - to find the shared messages from either the heap or object header. - This message is only found in the superblock - extension.
    Format of Data: See the tables - below.
    - - -
    - - - - - - - - - - - - - - - - - - - - - - - + + + + -
    - Shared Message Table Message -
    bytebytebytebyte
    VersionThis space inserted only to align table nicely

    Shared Object Header Message Table AddressO

    Number of IndicesThis space inserted only to align table nicely
    VersionThis space inserted + only to align table nicely
    + + Reference count + + +
    - +
    +
    +
    - - -
      - (Items marked with an ‘O’ in the above table are of the size - specified in “Size of Offsets” field in the superblock.) -
    + Field Name + Description + -
    + +

    Version

    +

    The version number for this message. This document + describes version 0.

    + -
    -
    - - - - - - - - - - - - - - - - - - - - + + + + -
    Field NameDescription

    Version

    The version number for this message. This document describes version 0.

    Shared Object Header Message Table Address

    This field is the address of the master table for shared - object header message indexes.

    -

    Number of Indices

    This field is the number of indices in the master table. -

    Reference Count

    The unsigned 32-bit integer is the reference count + for the object. This message is only present in “version + 2” (or later) object headers, and if not present those object + header versions, the reference count for the object is assumed to + be 1.

    -
    + +
    -

    IV.A.2.q. The Object Header -Continuation Message

    +

    + IV.A.2.x. The File Space Info Message +

    - -
    + +
    - - - - - - - -
    Header Message Name: Object Header - Continuation
    Header Message Type: 0x0010
    Length: Fixed
    Status: Optional; may be - repeated.
    Description:The object header continuation is the location in the file - of a block containing more header messages for the current data - object. This can be used when header blocks become too large or - are likely to change over time.
    Format of Data: See the tables - below.
    - - -
    - - - - - - - + - - + - - + -
    - Object Header Continuation Message -
    bytebytebytebyteHeader Message Name: File Space Info

    OffsetO

    Header Message Type: 0x0018

    LengthL

    Length: Fixed
    - - - - + + - - -
      - (Items marked with an ‘O’ in the above table are of the size - specified in “Size of Offsets” field in the superblock.) -
    Status: Optional; may not be repeated.
      - (Items marked with an ‘L’ in the above table are of the size - specified in “Size of Lengths” field in the superblock.) -
    + Description: + This message stores the file space management strategy (see + description below) that the library uses in handling file space + request for the file. It also contains the free-space section + threshold used by the library’s free-space managers for the + file. If the strategy is 1, this message also contains the addresses + of the file’s free-space managers which track free space for + each type of file space allocation. There are six basic types of + file space allocation: superblock, B-tree, raw data, global heap, + local heap, and object header. See the description of Free-space Manager as well the + description of allocation types in Appendix + B. + + + + Format of Data: See the tables below. + + +
    + - +
    + + -
    -
    -
    File Space Info
    - - + + + + - - + + + - - - + -
    Field NameDescriptionbytebytebytebyte

    Offset

    This value is the address in the file where the - header continuation block is located.

    VersionStrategyThresholdL

    Length

    This value is the length in bytes of the header continuation - block in the file.

    Super-block Free-space Manager AddressO
    -
    -
    - -

    The format of the header continuation block that this message points - to depends on the version of the object header that the message is - contained within. -

    - -

    - Continuation blocks for version 1 object headers have no special - formatting information; they are merely a list of object header - message info sequences (type, size, flags, reserved bytes and data - for each message sequence). See the description - of Version 1 Data Object Header Prefix. -

    - -

    Continuation blocks for version 2 object headers do have - special formatting information as described here - (see also the description of - Version 2 Data Object Header Prefix.): -

    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - Version 2 Object Header Continuation Block -
    bytebytebytebyte
    Signature
    Header Message Type #1Size of Header Message Data #1Header Message #1 Flags
    Header Message #1 Creation Order (optional)This space inserted only to align table nicely

    Header Message Data #1

    .
    .
    .
    Header Message Type #nSize of Header Message Data #nHeader Message #n Flags
    Header Message #n Creation Order (optional)This space inserted only to align table nicely

    Header Message Data #n

    Gap (optional, variable size)
    Checksum
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + -
    Field NameDescription

    Signature

    -

    The ASCII character string “OCHK” - is used to indicate the - beginning of an object header continuation block. This gives file - consistency checking utilities a better chance of reconstructing a - damaged file. -

    -

    Header Message #n Type

    -

    Same format as version 1 of the object header, described above. -

    Size of Header Message #n Data

    -

    Same format as version 1 of the object header, described above. -

    Header Message #n Flags

    -

    Same format as version 1 of the object header, described above. -

    Header Message #n Creation Order

    -

    This field stores the order that a message of a given type - was created in.

    -

    This field is present if bit 2 of flags is set.

    -

    Header Message #n Data

    -

    Same format as version 1 of the object header, described above. -

    Gap

    -

    A gap in an object header chunk is inferred by the end of the - messages for the chunk before the beginning of the chunk’s - checksum. Gaps are always smaller than the size of an - object header message prefix (message type + message size + - message flags).

    -

    Gaps are formed when a message (typically an attribute message) - in an earlier chunk is deleted and a message from a later - chunk that does not quite fit into the free space is moved - into the earlier chunk.

    -

    Checksum

    -

    This is the checksum for the object header chunk. -

    -
    B-tree Free-space Manager AddressO
    -
    + + Raw Data Free-space Manager AddressO + + + Global Heap Free-space Manager AddressO + + + Local Heap Free-space Manager AddressO + + + Object Header Free-space Manager AddressO + + -
    -

    IV.A.2.r. The Symbol Table -Message

    + + + + + + + + + +
     (Items marked with an ‘O’ in the + above table are of the size specified in “Size of + Offsets” field in the superblock.)
     (Items marked with an ‘L’ in the above table are + of the size specified in “Size of Lengths” field in the + superblock.)
    - -
    - - - - - - - - -
    Header Message Name: Symbol Table - Message
    Header Message Type: 0x0011
    Length: Fixed
    Status: Required for - “old style” groups; may not be repeated.
    Description:Each “old style” group has a v1 B-tree and a - local heap for storing symbol table entries, which are located - with this message.
    Format of data: See the tables - below.
    - - -
    - - + +
    +
    +
    - Symbol Table Message -
    - - - - + + - + + - + + -
    bytebytebytebyteField NameDescription

    v1 B-tree AddressO

    Version

    This is the version number of this message. This + document describes version 0.


    Local Heap AddressO

    Strategy

    This is the file space management strategy for the + file. There are four types of strategies:

    + + + + + + + + + + + + + + + + + + + + + + + + +
    ValueDescription
    1With this strategy, the HDF5 Library’s free-space + managers track the free space that results from the manipulation + of HDF5 objects in the HDF5 file. The free space information is + saved when the file is closed, and reloaded when the file is + reopened.
    When space is needed for file metadata or raw + data, the HDF5 Library first requests space from the + library’s free-space managers. If the request is not + satisfied, the library requests space from the aggregators. If + the request is still not satisfied, the library requests space + from the virtual file driver. That is, the library will use all + of the mechanisms for allocating space. +
    2This is the HDF5 Library’s default file space + management strategy. With this strategy, the library’s + free-space managers track the free space that results from the + manipulation of HDF5 objects in the HDF5 file. The free space + information is NOT saved when the file is closed and the free + space that exists upon file closing becomes unaccounted space in + the file.
    As with strategy #1, the library will try all + of the mechanisms for allocating space. When space is needed for + file metadata or raw data, the library first requests space from + the free-space managers. If the request is not satisfied, the + library requests space from the aggregators. If the request is + still not satisfied, the library requests space from the virtual + file driver. +
    3With this strategy, the HDF5 Library does not track free + space that results from the manipulation of HDF5 objects in the + HDF5 file and the free space becomes unaccounted space in the + file.
    When space is needed for file metadata or raw data, + the library first requests space from the aggregators. If the + request is not satisfied, the library requests space from the + virtual file driver. +
    4With this strategy, the HDF5 Library does not track free + space that results from the manipulation of HDF5 objects in the + HDF5 file and the free space becomes unaccounted space in the + file.
    When space is needed for file metadata or raw data, + the library requests space from the virtual file driver. +
    +

    - - - -
      - (Items marked with an ‘O’ in the above table are of the size - specified in “Size of Offsets” field in the superblock.) -
    +

    Threshold

    +

    + This is the free-space section threshold. The library’s + free-space managers will track only free-space sections with size + greater than or equal to threshold. The default is to + track free-space sections of all sizes. +

    + + +

    Superblock Free-space Manager Address

    +

    This is the address of the free-space manager for + H5FD_MEM_SUPER allocation type.

    + -
    + +

    B-tree Free-space Manager Address

    +

    This is the address of the free-space manager for + H5FD_MEM_BTREE allocation type.

    + -
    -
    - - - + + - - + + - - + + -
    Field NameDescription

    Raw Data Free-space Manager Address

    This is the address of the free-space manager for + H5FD_MEM_DRAW allocation type.

    v1 B-tree Address

    This value is the address of the v1 B-tree containing the - symbol table entries for the group.

    Global Heap Free-space Manager Address

    This is the address of the free-space manager for + H5FD_MEM_GHEAP allocation type.

    Local Heap Address

    This value is the address of the local heap containing - the link names for the symbol table entries for the group.

    Local Heap Free-space Manager Address

    This is the address of the free-space manager for + H5FD_MEM_LHEAP allocation type.

    -
    + +

    Object Header Free-space Manager Address

    +

    This is the address of the free-space manager for + H5FD_MEM_OHDR allocation type.

    + + +
    -

    IV.A.2.s. The Object -Modification Time Message

    - - -
    - - - - - - - - -
    Header Message Name: Object - Modification Time
    Header Message Type: 0x0012
    Length: Fixed
    Status: Optional; may not be - repeated.
    Description:The object modification time is a timestamp which indicates - the time of the last modification of an object. The time is - updated when any object header message changes according to - the system clock where the change was posted.
    Format of Data: See the tables - below.
    - - -
    - - - - - - - - - - - - - - - - - - -
    - Modification Time Message -
    bytebytebytebyte
    VersionReserved (zero)
    Seconds After UNIX Epoch
    -
    -
    -
    - - - - - - - - - - - - - - - -
    Field NameDescription

    Version

    The version number is used for changes in the format of Object Modification Time - and is described here: - - - - - - - - - - - - - - - -
    VersionDescription
    0Never used.
    1Used by Version 1.6.1 and after of the library to encode time. In - this version, the time is the seconds after Epoch.

    -

    Seconds After UNIX Epoch

    A 32-bit unsigned integer value that stores the number of - seconds since 0 hours, 0 minutes, 0 seconds, January 1, 1970, - Coordinated Universal Time.

    -

    -

    IV.A.2.t. The B-tree -‘K’ Values Message

    - - -
    - - - - - - - - -
    Header Message Name: B-tree - ‘K’ Values
    Header Message Type: 0x0013
    Length: Fixed
    Status: Optional; may not be - repeated.
    Description:This message retrieves non-default ‘K’ values - for internal and leaf nodes of a group or indexed storage v1 - B-trees. This message is only found in the superblock - extension.
    Format of Data: See the tables - below.
    - - -
    - - - - - - - - - - - - - - - - - - - - -
    - B-tree ‘K’ Values Message -
    bytebytebytebyte
    VersionIndexed Storage Internal Node KThis space inserted only to align table nicely
    Group Internal Node KGroup Leaf Node K
    -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - +

    + IV.B. Disk Format: Level 2B - Data Object + Data Storage +

    -
    Field NameDescription

    Version

    The version number for this message. This document describes - version 0.

    -

    Indexed Storage Internal Node K

    This is the node ‘K’ value for each internal node of an - indexed storage v1 B-tree. See the description of this field - in version 0 and 1 of the superblock as well the section on - v1 B-trees. -

    -

    Group Internal Node K

    This is the node ‘K’ value for each internal node of a group - v1 B-tree. See the description of this field in version 0 and - 1 of the superblock as well as the section on v1 B-trees. -

    -

    Group Leaf Node K

    This is the node ‘K’ value for each leaf node of a group v1 - B-tree. See the description of this field in version 0 and 1 - of the superblock as well as the section on v1 B-trees. -

    -
    -
    +

    The data for an object is stored separately from its header + information in the file and may not actually be located in the HDF5 + file itself if the header indicates that the data is stored externally. + The information for each record in the object is stored according to + the dimensionality of the object (indicated in the dataspace header + message). Multi-dimensional array data is stored in C order; in other + words, the “last” dimension changes fastest.

    + +

    Data whose elements are composed of atomic datatypes are stored + in IEEE format, unless they are specifically defined as being stored in + a different machine format with the architecture-type information from + the datatype header message. This means that each architecture will + need to [potentially] byte-swap data values into the internal + representation for that particular machine.

    + +

    Data with a variable-length datatype is stored in the global heap + of the HDF5 file. Global heap identifiers are stored in the data object + storage.

    + +

    Data whose elements are composed of reference datatypes are + stored in several different ways depending on the particular reference + type involved. Object pointers are just stored as the offset of the + object header being pointed to with the size of the pointer being the + same number of bytes as offsets in the file.

    -
    -

    IV.A.2.u. The Driver Info -Message

    +

    Dataset region references are stored as a heap-ID which points to + the following information within the file-heap: an offset of the object + pointed to, number-type information (same format as header message), + dimensionality information (same format as header message), sub-set + start and end information (in other words, a coordinate location for + each), and field start and end names (in other words, a [pointer to + the] string indicating the first field included and a [pointer to the] + string name for the last field).

    - -
    - - - - - - - - - -
    Header Message Name: Driver - Info
    Header Message Type: 0x0014
    Length: Varies
    Status: Optional; may not be - repeated.
    - Description:This message contains information needed by the file driver - to reopen a file. This message is only found in the - superblock extension: see the - “Disk Format: Level 0C - Superblock Extension” - section for more information. For more information on the fields - in the driver info message, see the - “Disk Format : Level 0B - File Driver Info” - section; those who use the multi and family file drivers will - find this section particularly helpful.
    Format of Data: See the tables - below.
    - - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - +

    Data of a compound datatype is stored as a contiguous stream of + the items in the structure, with each item formatted according to its + datatype.

    -
    - Driver Info Message -
    bytebytebytebyte
    VersionThis space inserted only to align table nicely

    Driver Identification
    Driver Information SizeThis space inserted only to align table nicely


    Driver Information (variable size)


    -
    -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameDescription

    Version

    The version number for this message. This document describes - version 0.

    -

    Driver Identification

    This is an eight-byte ASCII string without null termination which - identifies the driver. -

    -

    Driver Information Size

    The size in bytes of the Driver Information field of this - message.

    -

    Driver Information

    Driver information is stored in a format defined by the file driver.

    -
    -

    -

    IV.A.2.v. The Attribute Info -Message

    - - -
    - - - - - - - - -
    Header Message Name: Attribute - Info
    Header Message Type: 0x0015
    Length: Varies
    Status: Optional; may not be - repeated.
    Description:This message stores information about the attributes on an - object, such as the maximum creation index for the attributes - created and the location of the attribute storage when the - attributes are stored “densely”.
    Format of Data: See the tables - below.
    - - -
    - - - - - - - - - - - - - - - - - - - - - - - - +
    +
    +

    + V. Appendix A: Definitions +

    -
    - Attribute Info Message -
    bytebytebytebyte
    VersionFlagsMaximum Creation Index (optional)

    Fractal Heap AddressO


    Attribute Name v2 B-tree AddressO


    Attribute Creation Order v2 B-tree AddressO (optional)

    +

    Definitions of various terms used in this document are included + in this section.

    - +
    +
    - - -
      - (Items marked with an ‘O’ in the above table are of the size - specified in “Size of Offsets” field in the superblock.) -
    - -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + -
    Field NameDescription

    Version

    The version number for this message. This document describes - version 0.

    -

    Flags

    This is the attribute index information flag with the - following definition: - - - - - - - - - - - - - - - - - - - -
    BitDescription
    0If set, creation order for attributes is tracked. -
    1If set, creation order for attributes is indexed. -
    2-7Reserved

    - -

    Maximum Creation Index

    The is the maximum creation order index value for the - attributes on the object.

    -

    This field is present if bit 0 of Flags is set.

    -

    Fractal Heap Address

    This is the address of the fractal heap to store dense - attributes.

    -

    Attribute Name v2 B-tree Address

    This is the address of the version 2 B-tree to index the - names of densely stored attributes.

    -

    Attribute Creation Order v2 B-tree Address

    This is the address of the version 2 B-tree to index the - creation order of densely stored attributes.

    -

    This field is present if bit 1 of Flags is set.

    -
    TermDefinition
    -
    + + Undefined Address + The undefined address for a + file is a file address with all bits set: in other words, 0xffff...ff. + + -
    -

    IV.A.2.w. The Object Reference -Count Message

    + + Unlimited Size + The unlimited size for a size is + a value with all bits set: in other words, 0xffff...ff. + + - -
    - - - - - - - - -
    Header Message Name: Object Reference - Count
    Header Message Type: 0x0016
    Length: Fixed
    Status: Optional; may not be - repeated.
    Description:This message stores the number of hard links (in groups or - objects) pointing to an object: in other words, its - reference count.
    Format of Data: See the tables - below.
    - - -
    - - - - - - - - - - - - - - - - - - -
    - Object Reference Count -
    bytebytebytebyte
    VersionThis space inserted only to align table nicely
    Reference count
    -
    + + -
    -
    - - - - - - - - - - - - - - - -
    Field NameDescription

    Version

    The version number for this message. This document describes - version 0.

    -

    Reference Count

    The unsigned 32-bit integer is the reference count for the - object. This message is only present in “version 2” - (or later) object headers, and if not present those object - header versions, the reference count for the object is assumed - to be 1.

    -
    -

    -

    IV.A.2.x. The File Space Info -Message

    - - -
    - - - - - - - - -
    Header Message Name: File Space - Info
    Header Message Type: 0x0018
    Length: Fixed
    Status: Optional; may not be - repeated.
    - Description:This message stores the file space management strategy (see - description below) that the library uses in handling file space - request for the file. It also contains the free-space section - threshold used by the library’s free-space managers for - the file. If the strategy is 1, this message also contains the - addresses of the file’s free-space managers which track - free space for each type of file space allocation. There are - six basic types of file space allocation: superblock, B-tree, - raw data, global heap, local heap, and object header. See the - description of Free-space - Manager as well the description of allocation types in - Appendix B.
    Format of Data: See the tables - below.
    - - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - File Space Info -
    bytebytebytebyte
    VersionStrategyThresholdL
    Super-block Free-space Manager AddressO
    B-tree Free-space Manager AddressO
    Raw Data Free-space Manager AddressO
    Global Heap Free-space Manager AddressO
    Local Heap Free-space Manager AddressO
    Object Header Free-space Manager AddressO
    +
    +
    +

    + VI. Appendix B: File Memory Allocation Types +

    - +

    There are six basic types of file memory allocation as follows:

    +
    +
    - - - - - -
      - (Items marked with an ‘O’ in the above table are of the size - specified in “Size of Offsets” field in the superblock.) -
      - (Items marked with an ‘L’ in the above table are of the size - specified in “Size of Lengths” field in the superblock.) -
    - -
    + Basic Allocation Type + Description + -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameDescription

    Version

    This is the version number of this message. This document describes - version 0.

    -

    Strategy

    This is the file space management strategy for the file. - There are four types of strategies: - - - - - - - - - - - - - - - - - - - - - - - - -
    ValueDescription
    1With this strategy, the HDF5 Library’s free-space managers track the - free space that results from the manipulation of HDF5 objects - in the HDF5 file. The free space information is saved when the - file is closed, and reloaded when the file is reopened. -
    - When space is needed for file metadata or raw data, - the HDF5 Library first requests space from the library’s free-space - managers. If the request is not satisfied, the library requests space - from the aggregators. If the request is still not satisfied, - the library requests space from the virtual file driver. - That is, the library will use all of the mechanisms for allocating - space. -
    2This is the HDF5 Library’s default file space management strategy. - With this strategy, the library’s free-space managers track the free space - that results from the manipulation of HDF5 objects in the HDF5 file. - The free space information is NOT saved when the file is closed and - the free space that exists upon file closing becomes unaccounted - space in the file. -
    - As with strategy #1, the library will try all of the mechanisms - for allocating space. When space is needed for file metadata or - raw data, the library first requests space from the free-space - managers. If the request is not satisfied, the library requests - space from the aggregators. If the request is still not satisfied, - the library requests space from the virtual file driver. -
    3With this strategy, the HDF5 Library does not track free space that results - from the manipulation of HDF5 objects in the HDF5 file and - the free space becomes unaccounted space in the file. -
    - When space is needed for file metadata or raw data, - the library first requests space from the aggregators. - If the request is not satisfied, the library requests space from - the virtual file driver. -
    4With this strategy, the HDF5 Library does not track free space that results - from the manipulation of HDF5 objects in the HDF5 file and - the free space becomes unaccounted space in the file. -
    - When space is needed for file metadata or raw data, - the library requests space from the virtual file driver. -

    -

    Threshold

    This is the free-space section threshold. - The library’s free-space managers will track only - free-space sections with size greater than or equal to - threshold. The default is to track free-space - sections of all sizes.

    -

    Superblock Free-space Manager Address

    This is the address of the free-space manager for - H5FD_MEM_SUPER allocation type. -

    -

    B-tree Free-space Manager Address

    This is the address of the free-space manager for - H5FD_MEM_BTREE allocation type. -

    -

    Raw Data Free-space Manager Address

    This is the address of the free-space manager for - H5FD_MEM_DRAW allocation type. -

    -

    Global Heap Free-space Manager Address

    This is the address of the free-space manager for - H5FD_MEM_GHEAP allocation type. -

    -

    Local Heap Free-space Manager Address

    This is the address of the free-space manager for - H5FD_MEM_LHEAP allocation type. -

    -

    Object Header Free-space Manager Address

    This is the address of the free-space manager for - H5FD_MEM_OHDR allocation type. -

    -
    -
    -
    + + H5FD_MEM_SUPER + File memory allocated for Superblock. + + + H5FD_MEM_BTREE + File memory allocated for B-tree. + -
    -

    -IV.B. Disk Format: Level 2B - Data Object Data Storage

    + + H5FD_MEM_DRAW + File memory allocated for raw data. + -

    The data for an object is stored separately from its header - information in the file and may not actually be located in the HDF5 file - itself if the header indicates that the data is stored externally. The - information for each record in the object is stored according to the - dimensionality of the object (indicated in the dataspace header message). - Multi-dimensional array data is stored in C order; in other words, the - “last” dimension changes fastest.

    - -

    Data whose elements are composed of atomic datatypes are stored in IEEE - format, unless they are specifically defined as being stored in a different - machine format with the architecture-type information from the datatype - header message. This means that each architecture will need to [potentially] - byte-swap data values into the internal representation for that particular - machine.

    - -

    Data with a variable-length datatype is stored in the global heap - of the HDF5 file. Global heap identifiers are stored in the - data object storage.

    - -

    Data whose elements are composed of reference datatypes are stored in - several different ways depending on the particular reference type involved. - Object pointers are just stored as the offset of the object header being - pointed to with the size of the pointer being the same number of bytes as - offsets in the file.

    + + H5FD_MEM_GHEAP + File memory allocated for Global Heap. + -

    Dataset region references are stored as a heap-ID which points to -the following information within the file-heap: an offset of the object -pointed to, number-type information (same format as header message), -dimensionality information (same format as header message), sub-set start -and end information (in other words, a coordinate location for each), -and field start and end names (in other words, a [pointer to the] string -indicating the first field included and a [pointer to the] string name -for the last field).

    + + H5FD_MEM_LHEAP + File memory allocated for Local Heap. + -

    Data of a compound datatype is stored as a contiguous stream of the items - in the structure, with each item formatted according to its datatype.

    + + H5FD_MEM_OHDR + File memory allocated for Object Header. + + + +

    There are other file memory allocation types that are mapped to + the above six basic allocation types because they are similar in + nature. The mapping is listed in the following table:

    +
    + + + + + -
    -
    -
    -

    -V. Appendix A: Definitions

    + + + + -

    Definitions of various terms used in this document are included in -this section.

    + + + + -
    -
    Basic Allocation TypeMapping of Allocation Types to Basic Allocation Types
    H5FD_MEM_SUPERnone
    H5FD_MEM_BTREEH5FD_MEM_SOHM_INDEX
    - - - - + + + + - - - - + + + + - - - - + + + + + + + +
    TermDefinition
    H5FD_MEM_DRAWH5FD_MEM_FHEAP_HUGE_OBJ
    Undefined AddressThe undefined - address for a file is a file address with all bits - set: in other words, 0xffff...ff.
    H5FD_MEM_GHEAPnone
    Unlimited SizeThe unlimited size - for a size is a value with all bits set: in other words, - 0xffff...ff.
    H5FD_MEM_LHEAPH5FD_MEM_FHEAP_DBLOCK, H5FD_MEM_FSPACE_SINFO
    H5FD_MEM_OHDRH5FD_MEM_FHEAP_HDR, H5FD_MEM_FHEAP_IBLOCK, + H5FD_MEM_FSPACE_HDR, H5FD_MEM_SOHM_TABLE
    -
    + +

    Allocation types that are mapped to basic allocation types are + described below:

    +
    + + + + + -
    -
    -
    -

    -VI. Appendix B: File Memory Allocation Types

    + + + + -

    There are six basic types of file memory allocation as follows: -

    -
    -
    Allocation TypeDescription
    H5FD_MEM_FHEAP_HDRFile memory allocated for Fractal Heap Header.
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Basic Allocation TypeDescription
    H5FD_MEM_SUPERFile memory allocated for Superblock.
    H5FD_MEM_BTREEFile memory allocated for B-tree.
    H5FD_MEM_DRAWFile memory allocated for raw data.
    H5FD_MEM_GHEAPFile memory allocated for Global Heap.
    H5FD_MEM_LHEAPFile memory allocated for Local Heap.
    H5FD_MEM_OHDRFile memory allocated for Object Header.
    -
    + + H5FD_MEM_FHEAP_DBLOCK + File memory allocated for Fractal Heap Direct + Blocks. + -

    There are other file memory allocation types that are mapped to the -above six basic allocation types because they are similar in nature. -The mapping is listed in the following table: -

    + + H5FD_MEM_FHEAP_IBLOCK + File memory allocated for Fractal Heap Indirect + Blocks. + -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Basic Allocation TypeMapping of Allocation Types to Basic Allocation Types
    H5FD_MEM_SUPERnone
    H5FD_MEM_BTREEH5FD_MEM_SOHM_INDEX
    H5FD_MEM_DRAWH5FD_MEM_FHEAP_HUGE_OBJ
    H5FD_MEM_GHEAPnone
    H5FD_MEM_LHEAPH5FD_MEM_FHEAP_DBLOCK, H5FD_MEM_FSPACE_SINFO
    H5FD_MEM_OHDRH5FD_MEM_FHEAP_HDR, H5FD_MEM_FHEAP_IBLOCK, H5FD_MEM_FSPACE_HDR, H5FD_MEM_SOHM_TABLE
    -
    + + H5FD_MEM_FHEAP_HUGE_OBJ + File memory allocated for huge objects in the fractal heap. + -

    Allocation types that are mapped to basic allocation types are described below: -

    + + H5FD_MEM_FSPACE_HDR + File memory allocated for Free-space Manager + Header. + -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Allocation TypeDescription
    H5FD_MEM_FHEAP_HDRFile memory allocated for Fractal Heap Header.
    H5FD_MEM_FHEAP_DBLOCKFile memory allocated for Fractal Heap Direct Blocks.
    H5FD_MEM_FHEAP_IBLOCKFile memory allocated for Fractal Heap Indirect Blocks.
    H5FD_MEM_FHEAP_HUGE_OBJFile memory allocated for huge objects in the fractal heap.
    H5FD_MEM_FSPACE_HDRFile memory allocated for Free-space Manager Header.
    H5FD_MEM_FSPACE_SINFOFile memory allocated for Free-space Section List of the free-space manager.
    H5FD_MEM_SOHM_TABLEFile memory allocated for Shared Object Header Message Table.
    H5FD_MEM_SOHM_INDEXFile memory allocated for Shared Message Record List.
    -
    - + + H5FD_MEM_FSPACE_SINFO + File memory allocated for Free-space Section List + of the free-space manager. + + + + H5FD_MEM_SOHM_TABLE + File memory allocated for Shared Object Header + Message Table. + + + H5FD_MEM_SOHM_INDEX + File memory allocated for Shared Message Record + List. + + + + + diff --git a/doxygen/examples/H5.format.html b/doxygen/examples/H5.format.html index 47e19bf..9134d35 100644 --- a/doxygen/examples/H5.format.html +++ b/doxygen/examples/H5.format.html @@ -418,7 +418,7 @@

    This document describes the lower-level data objects; the higher-level objects and their properties are described - in the HDF5 User’s Guide.

    + in the HDF5 User Guide.

    Three levels of information comprise the file format. Level 0 contains basic information for identifying and diff --git a/doxygen/examples/ThreadSafeLibrary.html b/doxygen/examples/ThreadSafeLibrary.html index 97f7742..5824dc6 100644 --- a/doxygen/examples/ThreadSafeLibrary.html +++ b/doxygen/examples/ThreadSafeLibrary.html @@ -20,9 +20,9 @@ The following code is placed at the beginning of H5private.h:

    -H5_HAVE_THREADSAFE is defined when the HDF-5 library is +H5_HAVE_THREADSAFE is defined when the HDF5 library is compiled with the --enable-threadsafe configuration option. In general, -code for the non-threadsafe version of HDF-5 library are placed within +code for the non-threadsafe version of HDF5 library are placed within the #else part of the conditional compilation. The exception to this rule are the changes to the FUNC_ENTER (in H5private.h), HRETURN and HRETURN_ERROR (in @@ -438,7 +438,7 @@ described in Appendix D and may be found in H5TS.c.

    Except where stated, all tests involve 16 simultaneous threads that make -use of HDF-5 API calls without any explicit synchronization typically +use of HDF5 API calls without any explicit synchronization typically required in a non-threadsafe environment.

    @@ -453,7 +453,7 @@ dataset's named value.

    The main thread would join with all 16 threads and attempt to match the -resulting HDF-5 file with expected results - that each dataset contains +resulting HDF5 file with expected results - that each dataset contains the correct value (0 for zero, 1 for one etc ...) and all datasets were correctly created.

    @@ -473,7 +473,7 @@ name.

    The error stack implementation runs correctly if it reports 15 instances -of the dataset name conflict error and finally generates a correct HDF-5 +of the dataset name conflict error and finally generates a correct HDF5 containing that single dataset. Each thread should report its own stack of errors with a thread number associated with it.

    diff --git a/doxygen/examples/core_menu.md b/doxygen/examples/core_menu.md new file mode 100644 index 0000000..3fd7d11 --- /dev/null +++ b/doxygen/examples/core_menu.md @@ -0,0 +1,69 @@ +Core Library + +- @ref H5A "Attributes (H5A)" +
    +HDF5 attribute is a small metadata object describing the nature and/or intended usage of a primary data object. + +- @ref H5D "Datasets (H5D)" +
    +Manage HDF5 datasets, including the transfer of data between memory and disk and the description of dataset properties. + +- @ref H5S "Dataspaces (H5S)" +
    +HDF5 dataspaces describe the shape of datasets in memory or in HDF5 files. + +- @ref H5T "Datatypes (H5T)" +
    +HDF5 datatypes describe the element type of HDF5 datasets and attributes. + +- @ref H5E "Error Handling (H5E)" +
    +HDF5 library error reporting. + +- @ref H5ES "Event Set (H5ES)" +
    +HDF5 event set life cycle used with HDF5 VOL connectors that enable the asynchronous feature in HDF5. + +- @ref H5F "Files (H5F)" +
    +Manage HDF5 files. + +- @ref H5Z "Filters (H5Z)" +
    +Manage HDF5 user-defined filters + +- @ref H5G "Groups (H5G)" +
    +Manage HDF5 groups. + +- @ref H5I "Identifiers (H5I)" +
    +Manage identifiers defined by the HDF5 library. + +- @ref H5 "Library General (H5)" +
    +Manage the life cycle of HDF5 library instances. + +- @ref H5L "Links (H5L)" +
    +Manage HDF5 links and link types. + +- @ref H5O "Objects (H5O)" +
    +Manage HDF5 objects (groups, datasets, datatype objects). + +- @ref H5P "Property Lists (H5P)" +
    +HDF5 property lists are the main vehicle to configure the behavior of HDF5 API functions. + +- @ref H5PL "Dynamically-loaded Plugins (H5PL)" +
    +Manage the loading behavior of HDF5 plugins. + +- @ref H5R "References (H5R)" +
    +Manage HDF5 references (HDF5 objects, attributes, and selections on datasets a.k.a. dataset regions). + +- @ref H5VL "VOL Connector (H5VL)" +
    +Manage HDF5 VOL connector plugins. diff --git a/doxygen/examples/fortran_menu.md b/doxygen/examples/fortran_menu.md new file mode 100644 index 0000000..8ef4ead --- /dev/null +++ b/doxygen/examples/fortran_menu.md @@ -0,0 +1,73 @@ +Fortran Library + +- @ref FH5A "Attributes (H5A)" +
    +HDF5 attribute is a small metadata object describing the nature and/or intended usage of a primary data object. + +- @ref FH5D "Datasets (H5D)" +
    +Manage HDF5 datasets, including the transfer of data between memory and disk and the description of dataset properties. + +- @ref FH5S "Dataspaces (H5S)" +
    +HDF5 dataspaces describe the shape of datasets in memory or in HDF5 files. + +- @ref FH5T "Datatypes (H5T)" +
    +HDF5 datatypes describe the element type of HDF5 datasets and attributes. + +- @ref FH5E "Error Handling (H5E)" +
    +HDF5 library error reporting. + +- @ref FH5F "Files (H5F)" +
    +Manage HDF5 files. + +- @ref FH5Z "Filters (H5Z)" +
    +Manage HDF5 user-defined filters + +- @ref FH5G "Groups (H5G)" +
    +Manage HDF5 groups. + +- @ref FH5I "Identifiers (H5I)" +
    +Manage identifiers defined by the HDF5 library. + +- @ref FH5 "Library General (H5)" +
    +Manage the life cycle of HDF5 library instances. + +- @ref FH5L "Links (H5L)" +
    +Manage HDF5 links and link types. + +- @ref FH5O "Objects (H5O)" +
    +Manage HDF5 objects (groups, datasets, datatype objects). + +- @ref FH5P "Property Lists (H5P)" +
    +HDF5 property lists are the main vehicle to configure the behavior of HDF5 API functions. + +- @ref FH5R "References (H5R)" +
    +Manage HDF5 references (HDF5 objects, attributes, and selections on datasets a.k.a. dataset regions). + +- @ref FH5LT "High Level Lite (H5LT)" +
    +Functions to simplify creating and manipulating datasets, attributes and other features + +- @ref FH5IM "High Level Image (H5IM)" +
    +Creating and manipulating HDF5 datasets intended to be interpreted as images + +- @ref FH5TB "High Level Table (H5TB)" +
    +Creating and manipulating HDF5 datasets intended to be interpreted as tables + +- @ref FH5DS "High Level Dimension Scale (H5DS)" +
    +Creating and manipulating HDF5 datasets that are associated with the dimension of another HDF5 dataset diff --git a/doxygen/examples/high_level_menu.md b/doxygen/examples/high_level_menu.md new file mode 100644 index 0000000..d209bf4 --- /dev/null +++ b/doxygen/examples/high_level_menu.md @@ -0,0 +1,30 @@ +High-level library +
    +The high-level HDF5 library includes several sets of convenience and standard-use APIs to +facilitate common HDF5 operations. + +- @ref H5LT +
    +Functions to simplify creating and manipulating datasets, attributes and other features + +- @ref H5IM +
    +Creating and manipulating HDF5 datasets intended to be interpreted as images + +- @ref H5TB +
    +Creating and manipulating HDF5 datasets intended to be interpreted as tables + +- @ref H5PT +
    +Creating and manipulating HDF5 datasets to support append- and read-only operations on table data + +- @ref H5DS +
    +Creating and manipulating HDF5 datasets that are associated with the dimension of another HDF5 dataset + +- @ref H5DO +
    +Bypassing default HDF5 behavior in order to optimize for specific use cases + +- @ref H5LR "Extensions (H5LR, H5LT)" diff --git a/doxygen/examples/java_menu.md b/doxygen/examples/java_menu.md new file mode 100644 index 0000000..1236838 --- /dev/null +++ b/doxygen/examples/java_menu.md @@ -0,0 +1,84 @@ +Java Library + @ref HDF5LIB + +- @ref JH5 +
    +This package is the Java interface for the HDF5 library. + +- @ref JH5A +
    +This package is the Java interface for the HDF5 library attribute APIs. + +- @ref JH5D +
    +This package is the Java interface for the HDF5 library dataset APIs. + +- @ref JH5S +
    +This package is the Java interface for the HDF5 library dataspace APIs. + +- @ref JH5T +
    +This package is the Java interface for the HDF5 library datatype APIs. + +- @ref JH5E +
    +This package is the Java interface for the HDF5 library error APIs. + +- @ref JH5F +
    +This package is the Java interface for the HDF5 library file APIs. + +- @ref JH5Z +
    +This package is the Java interface for the HDF5 library filter APIs. + +- @ref JH5G +
    +This package is the Java interface for the HDF5 library group APIs. + +- @ref JH5I +
    +This package is the Java interface for the HDF5 library identifier APIs. + +- @ref JH5L +
    +This package is the Java interface for the HDF5 library links APIs. + +- @ref JH5O +
    +This package is the Java interface for the HDF5 library object APIs. + +- @ref JH5P +
    +This package is the Java interface for the HDF5 library property list APIs. + +- @ref JH5PL +
    +This package is the Java interface for the HDF5 library plugin APIs. + +- @ref JH5R +
    +This package is the Java interface for the HDF5 library reference APIs. + +- @ref JH5VL +
    +This package is the Java interface for the HDF5 library VOL connector APIs. + +- @ref HDF5CONST +
    +This class contains C constants and enumerated types of HDF5 library. + +- @ref HDFNATIVE +
    +This class encapsulates native methods to deal with arrays of numbers, + * converting from numbers to bytes and bytes to numbers. + +- @ref HDFARRAY +
    +This is a class for handling multidimensional arrays for HDF. + +- @ref ERRORS +
    +The class HDF5Exception returns errors from the Java HDF5 Interface. + \ No newline at end of file diff --git a/doxygen/hdf5doxy_layout.xml b/doxygen/hdf5doxy_layout.xml index 24642b5..f7c47bf 100644 --- a/doxygen/hdf5doxy_layout.xml +++ b/doxygen/hdf5doxy_layout.xml @@ -3,8 +3,9 @@ - + + diff --git a/doxygen/img/DataGroup.png b/doxygen/img/DataGroup.png new file mode 100644 index 0000000..4edeba3 Binary files /dev/null and b/doxygen/img/DataGroup.png differ diff --git a/doxygen/img/Dmodel_fig1.gif b/doxygen/img/Dmodel_fig1.gif new file mode 100644 index 0000000..ca8093c Binary files /dev/null and b/doxygen/img/Dmodel_fig1.gif differ diff --git a/doxygen/img/Dmodel_fig10.gif b/doxygen/img/Dmodel_fig10.gif new file mode 100644 index 0000000..c6a9916 Binary files /dev/null and b/doxygen/img/Dmodel_fig10.gif differ diff --git a/doxygen/img/Dmodel_fig11_b.gif b/doxygen/img/Dmodel_fig11_b.gif new file mode 100644 index 0000000..19ea9fb Binary files /dev/null and b/doxygen/img/Dmodel_fig11_b.gif differ diff --git a/doxygen/img/Dmodel_fig12_a.gif b/doxygen/img/Dmodel_fig12_a.gif new file mode 100644 index 0000000..1f597df Binary files /dev/null and b/doxygen/img/Dmodel_fig12_a.gif differ diff --git a/doxygen/img/Dmodel_fig12_b.gif b/doxygen/img/Dmodel_fig12_b.gif new file mode 100644 index 0000000..f271082 Binary files /dev/null and b/doxygen/img/Dmodel_fig12_b.gif differ diff --git a/doxygen/img/Dmodel_fig14_a.gif b/doxygen/img/Dmodel_fig14_a.gif new file mode 100644 index 0000000..45d6c6c Binary files /dev/null and b/doxygen/img/Dmodel_fig14_a.gif differ diff --git a/doxygen/img/Dmodel_fig14_b.gif b/doxygen/img/Dmodel_fig14_b.gif new file mode 100644 index 0000000..12a667d Binary files /dev/null and b/doxygen/img/Dmodel_fig14_b.gif differ diff --git a/doxygen/img/Dmodel_fig14_c.gif b/doxygen/img/Dmodel_fig14_c.gif new file mode 100644 index 0000000..0c06049 Binary files /dev/null and b/doxygen/img/Dmodel_fig14_c.gif differ diff --git a/doxygen/img/Dmodel_fig14_d.gif b/doxygen/img/Dmodel_fig14_d.gif new file mode 100644 index 0000000..7cb8956 Binary files /dev/null and b/doxygen/img/Dmodel_fig14_d.gif differ diff --git a/doxygen/img/Dmodel_fig2.gif b/doxygen/img/Dmodel_fig2.gif new file mode 100644 index 0000000..c2c9d04 Binary files /dev/null and b/doxygen/img/Dmodel_fig2.gif differ diff --git a/doxygen/img/Dmodel_fig3_a.gif b/doxygen/img/Dmodel_fig3_a.gif new file mode 100644 index 0000000..9f00832 Binary files /dev/null and b/doxygen/img/Dmodel_fig3_a.gif differ diff --git a/doxygen/img/Dmodel_fig3_c.gif b/doxygen/img/Dmodel_fig3_c.gif new file mode 100644 index 0000000..8529181 Binary files /dev/null and b/doxygen/img/Dmodel_fig3_c.gif differ diff --git a/doxygen/img/Dmodel_fig4_a.gif b/doxygen/img/Dmodel_fig4_a.gif new file mode 100644 index 0000000..c7fdce1 Binary files /dev/null and b/doxygen/img/Dmodel_fig4_a.gif differ diff --git a/doxygen/img/Dmodel_fig4_b.gif b/doxygen/img/Dmodel_fig4_b.gif new file mode 100644 index 0000000..34053d5 Binary files /dev/null and b/doxygen/img/Dmodel_fig4_b.gif differ diff --git a/doxygen/img/Dmodel_fig5.gif b/doxygen/img/Dmodel_fig5.gif new file mode 100644 index 0000000..69e11f5 Binary files /dev/null and b/doxygen/img/Dmodel_fig5.gif differ diff --git a/doxygen/img/Dmodel_fig6.gif b/doxygen/img/Dmodel_fig6.gif new file mode 100644 index 0000000..bf677c2 Binary files /dev/null and b/doxygen/img/Dmodel_fig6.gif differ diff --git a/doxygen/img/Dmodel_fig7_b.gif b/doxygen/img/Dmodel_fig7_b.gif new file mode 100644 index 0000000..da27fa0 Binary files /dev/null and b/doxygen/img/Dmodel_fig7_b.gif differ diff --git a/doxygen/img/Dmodel_fig8.gif b/doxygen/img/Dmodel_fig8.gif new file mode 100644 index 0000000..27305a8 Binary files /dev/null and b/doxygen/img/Dmodel_fig8.gif differ diff --git a/doxygen/img/Dmodel_fig9.gif b/doxygen/img/Dmodel_fig9.gif new file mode 100644 index 0000000..31893bf Binary files /dev/null and b/doxygen/img/Dmodel_fig9.gif differ diff --git a/doxygen/img/Dsets_NbitFloating1.gif b/doxygen/img/Dsets_NbitFloating1.gif new file mode 100644 index 0000000..3d3ce19 Binary files /dev/null and b/doxygen/img/Dsets_NbitFloating1.gif differ diff --git a/doxygen/img/Dsets_NbitFloating2.gif b/doxygen/img/Dsets_NbitFloating2.gif new file mode 100644 index 0000000..cdb5a90 Binary files /dev/null and b/doxygen/img/Dsets_NbitFloating2.gif differ diff --git a/doxygen/img/Dsets_NbitInteger1.gif b/doxygen/img/Dsets_NbitInteger1.gif new file mode 100644 index 0000000..656fb8d Binary files /dev/null and b/doxygen/img/Dsets_NbitInteger1.gif differ diff --git a/doxygen/img/Dsets_NbitInteger2.gif b/doxygen/img/Dsets_NbitInteger2.gif new file mode 100644 index 0000000..e100ebe Binary files /dev/null and b/doxygen/img/Dsets_NbitInteger2.gif differ diff --git a/doxygen/img/Dsets_fig1.gif b/doxygen/img/Dsets_fig1.gif new file mode 100644 index 0000000..c8f3349 Binary files /dev/null and b/doxygen/img/Dsets_fig1.gif differ diff --git a/doxygen/img/Dsets_fig10.gif b/doxygen/img/Dsets_fig10.gif new file mode 100644 index 0000000..4593cc1 Binary files /dev/null and b/doxygen/img/Dsets_fig10.gif differ diff --git a/doxygen/img/Dsets_fig11.gif b/doxygen/img/Dsets_fig11.gif new file mode 100644 index 0000000..573701a Binary files /dev/null and b/doxygen/img/Dsets_fig11.gif differ diff --git a/doxygen/img/Dsets_fig12.gif b/doxygen/img/Dsets_fig12.gif new file mode 100644 index 0000000..d9ddd2b Binary files /dev/null and b/doxygen/img/Dsets_fig12.gif differ diff --git a/doxygen/img/Dsets_fig2.gif b/doxygen/img/Dsets_fig2.gif new file mode 100644 index 0000000..8ecc2c7 Binary files /dev/null and b/doxygen/img/Dsets_fig2.gif differ diff --git a/doxygen/img/Dsets_fig3.gif b/doxygen/img/Dsets_fig3.gif new file mode 100644 index 0000000..642715e Binary files /dev/null and b/doxygen/img/Dsets_fig3.gif differ diff --git a/doxygen/img/Dsets_fig4.gif b/doxygen/img/Dsets_fig4.gif new file mode 100644 index 0000000..a24ccc9 Binary files /dev/null and b/doxygen/img/Dsets_fig4.gif differ diff --git a/doxygen/img/Dsets_fig5.gif b/doxygen/img/Dsets_fig5.gif new file mode 100644 index 0000000..78c953e Binary files /dev/null and b/doxygen/img/Dsets_fig5.gif differ diff --git a/doxygen/img/Dsets_fig6.gif b/doxygen/img/Dsets_fig6.gif new file mode 100644 index 0000000..ea15564 Binary files /dev/null and b/doxygen/img/Dsets_fig6.gif differ diff --git a/doxygen/img/Dsets_fig7.gif b/doxygen/img/Dsets_fig7.gif new file mode 100644 index 0000000..f7f6b9e Binary files /dev/null and b/doxygen/img/Dsets_fig7.gif differ diff --git a/doxygen/img/Dsets_fig8.gif b/doxygen/img/Dsets_fig8.gif new file mode 100644 index 0000000..91cb6aa Binary files /dev/null and b/doxygen/img/Dsets_fig8.gif differ diff --git a/doxygen/img/Dsets_fig9.gif b/doxygen/img/Dsets_fig9.gif new file mode 100644 index 0000000..802ca52 Binary files /dev/null and b/doxygen/img/Dsets_fig9.gif differ diff --git a/doxygen/img/Dspace_CvsF1.gif b/doxygen/img/Dspace_CvsF1.gif new file mode 100644 index 0000000..716b9f1 Binary files /dev/null and b/doxygen/img/Dspace_CvsF1.gif differ diff --git a/doxygen/img/Dspace_CvsF2.gif b/doxygen/img/Dspace_CvsF2.gif new file mode 100644 index 0000000..716b9f1 Binary files /dev/null and b/doxygen/img/Dspace_CvsF2.gif differ diff --git a/doxygen/img/Dspace_CvsF3.gif b/doxygen/img/Dspace_CvsF3.gif new file mode 100644 index 0000000..59c31ff Binary files /dev/null and b/doxygen/img/Dspace_CvsF3.gif differ diff --git a/doxygen/img/Dspace_CvsF4.gif b/doxygen/img/Dspace_CvsF4.gif new file mode 100644 index 0000000..e97b006 Binary files /dev/null and b/doxygen/img/Dspace_CvsF4.gif differ diff --git a/doxygen/img/Dspace_combine.gif b/doxygen/img/Dspace_combine.gif new file mode 100644 index 0000000..8da2397 Binary files /dev/null and b/doxygen/img/Dspace_combine.gif differ diff --git a/doxygen/img/Dspace_complex.gif b/doxygen/img/Dspace_complex.gif new file mode 100644 index 0000000..53e92ee Binary files /dev/null and b/doxygen/img/Dspace_complex.gif differ diff --git a/doxygen/img/Dspace_features.gif b/doxygen/img/Dspace_features.gif new file mode 100644 index 0000000..d94b4e4 Binary files /dev/null and b/doxygen/img/Dspace_features.gif differ diff --git a/doxygen/img/Dspace_features_cmpd.gif b/doxygen/img/Dspace_features_cmpd.gif new file mode 100644 index 0000000..f24ee99 Binary files /dev/null and b/doxygen/img/Dspace_features_cmpd.gif differ diff --git a/doxygen/img/Dspace_move.gif b/doxygen/img/Dspace_move.gif new file mode 100644 index 0000000..5debd75 Binary files /dev/null and b/doxygen/img/Dspace_move.gif differ diff --git a/doxygen/img/Dspace_point.gif b/doxygen/img/Dspace_point.gif new file mode 100644 index 0000000..92ad3a8 Binary files /dev/null and b/doxygen/img/Dspace_point.gif differ diff --git a/doxygen/img/Dspace_read.gif b/doxygen/img/Dspace_read.gif new file mode 100644 index 0000000..28c67f4 Binary files /dev/null and b/doxygen/img/Dspace_read.gif differ diff --git a/doxygen/img/Dspace_select.gif b/doxygen/img/Dspace_select.gif new file mode 100644 index 0000000..b9f4851 Binary files /dev/null and b/doxygen/img/Dspace_select.gif differ diff --git a/doxygen/img/Dspace_separate.gif b/doxygen/img/Dspace_separate.gif new file mode 100644 index 0000000..ba4ba8c Binary files /dev/null and b/doxygen/img/Dspace_separate.gif differ diff --git a/doxygen/img/Dspace_simple.gif b/doxygen/img/Dspace_simple.gif new file mode 100644 index 0000000..ff3eca5 Binary files /dev/null and b/doxygen/img/Dspace_simple.gif differ diff --git a/doxygen/img/Dspace_subset.gif b/doxygen/img/Dspace_subset.gif new file mode 100644 index 0000000..b353175 Binary files /dev/null and b/doxygen/img/Dspace_subset.gif differ diff --git a/doxygen/img/Dspace_three_datasets.gif b/doxygen/img/Dspace_three_datasets.gif new file mode 100644 index 0000000..4af222f Binary files /dev/null and b/doxygen/img/Dspace_three_datasets.gif differ diff --git a/doxygen/img/Dspace_transfer.gif b/doxygen/img/Dspace_transfer.gif new file mode 100644 index 0000000..7de0231 Binary files /dev/null and b/doxygen/img/Dspace_transfer.gif differ diff --git a/doxygen/img/Dspace_write1to2.gif b/doxygen/img/Dspace_write1to2.gif new file mode 100644 index 0000000..5735bc7 Binary files /dev/null and b/doxygen/img/Dspace_write1to2.gif differ diff --git a/doxygen/img/Dtypes_fig1.gif b/doxygen/img/Dtypes_fig1.gif new file mode 100644 index 0000000..484f54f Binary files /dev/null and b/doxygen/img/Dtypes_fig1.gif differ diff --git a/doxygen/img/Dtypes_fig10.gif b/doxygen/img/Dtypes_fig10.gif new file mode 100644 index 0000000..60c8ba9 Binary files /dev/null and b/doxygen/img/Dtypes_fig10.gif differ diff --git a/doxygen/img/Dtypes_fig11.gif b/doxygen/img/Dtypes_fig11.gif new file mode 100644 index 0000000..b5eda71 Binary files /dev/null and b/doxygen/img/Dtypes_fig11.gif differ diff --git a/doxygen/img/Dtypes_fig12.gif b/doxygen/img/Dtypes_fig12.gif new file mode 100644 index 0000000..ee911b7 Binary files /dev/null and b/doxygen/img/Dtypes_fig12.gif differ diff --git a/doxygen/img/Dtypes_fig13a.gif b/doxygen/img/Dtypes_fig13a.gif new file mode 100644 index 0000000..2f47b71 Binary files /dev/null and b/doxygen/img/Dtypes_fig13a.gif differ diff --git a/doxygen/img/Dtypes_fig13b.gif b/doxygen/img/Dtypes_fig13b.gif new file mode 100644 index 0000000..fe3b5fb Binary files /dev/null and b/doxygen/img/Dtypes_fig13b.gif differ diff --git a/doxygen/img/Dtypes_fig13c.gif b/doxygen/img/Dtypes_fig13c.gif new file mode 100644 index 0000000..afd2834 Binary files /dev/null and b/doxygen/img/Dtypes_fig13c.gif differ diff --git a/doxygen/img/Dtypes_fig13d.gif b/doxygen/img/Dtypes_fig13d.gif new file mode 100644 index 0000000..48805d8 Binary files /dev/null and b/doxygen/img/Dtypes_fig13d.gif differ diff --git a/doxygen/img/Dtypes_fig14.gif b/doxygen/img/Dtypes_fig14.gif new file mode 100644 index 0000000..8f4d787 Binary files /dev/null and b/doxygen/img/Dtypes_fig14.gif differ diff --git a/doxygen/img/Dtypes_fig15.gif b/doxygen/img/Dtypes_fig15.gif new file mode 100644 index 0000000..82a34d0 Binary files /dev/null and b/doxygen/img/Dtypes_fig15.gif differ diff --git a/doxygen/img/Dtypes_fig16.gif b/doxygen/img/Dtypes_fig16.gif new file mode 100644 index 0000000..e83d379 Binary files /dev/null and b/doxygen/img/Dtypes_fig16.gif differ diff --git a/doxygen/img/Dtypes_fig16a.gif b/doxygen/img/Dtypes_fig16a.gif new file mode 100644 index 0000000..7e68cc0 Binary files /dev/null and b/doxygen/img/Dtypes_fig16a.gif differ diff --git a/doxygen/img/Dtypes_fig16b.gif b/doxygen/img/Dtypes_fig16b.gif new file mode 100644 index 0000000..b7919be Binary files /dev/null and b/doxygen/img/Dtypes_fig16b.gif differ diff --git a/doxygen/img/Dtypes_fig16c.gif b/doxygen/img/Dtypes_fig16c.gif new file mode 100644 index 0000000..cca285a Binary files /dev/null and b/doxygen/img/Dtypes_fig16c.gif differ diff --git a/doxygen/img/Dtypes_fig16d.gif b/doxygen/img/Dtypes_fig16d.gif new file mode 100644 index 0000000..8ca0fd7 Binary files /dev/null and b/doxygen/img/Dtypes_fig16d.gif differ diff --git a/doxygen/img/Dtypes_fig17a.gif b/doxygen/img/Dtypes_fig17a.gif new file mode 100644 index 0000000..cdfaa29 Binary files /dev/null and b/doxygen/img/Dtypes_fig17a.gif differ diff --git a/doxygen/img/Dtypes_fig17b.gif b/doxygen/img/Dtypes_fig17b.gif new file mode 100644 index 0000000..4a3ba33 Binary files /dev/null and b/doxygen/img/Dtypes_fig17b.gif differ diff --git a/doxygen/img/Dtypes_fig18.gif b/doxygen/img/Dtypes_fig18.gif new file mode 100644 index 0000000..73c33e0 Binary files /dev/null and b/doxygen/img/Dtypes_fig18.gif differ diff --git a/doxygen/img/Dtypes_fig19.gif b/doxygen/img/Dtypes_fig19.gif new file mode 100644 index 0000000..38ea6d4 Binary files /dev/null and b/doxygen/img/Dtypes_fig19.gif differ diff --git a/doxygen/img/Dtypes_fig2.gif b/doxygen/img/Dtypes_fig2.gif new file mode 100644 index 0000000..52285a6 Binary files /dev/null and b/doxygen/img/Dtypes_fig2.gif differ diff --git a/doxygen/img/Dtypes_fig20a.gif b/doxygen/img/Dtypes_fig20a.gif new file mode 100644 index 0000000..8406e77 Binary files /dev/null and b/doxygen/img/Dtypes_fig20a.gif differ diff --git a/doxygen/img/Dtypes_fig20b.gif b/doxygen/img/Dtypes_fig20b.gif new file mode 100644 index 0000000..3f2331d Binary files /dev/null and b/doxygen/img/Dtypes_fig20b.gif differ diff --git a/doxygen/img/Dtypes_fig20c.gif b/doxygen/img/Dtypes_fig20c.gif new file mode 100644 index 0000000..5b60165 Binary files /dev/null and b/doxygen/img/Dtypes_fig20c.gif differ diff --git a/doxygen/img/Dtypes_fig20d.gif b/doxygen/img/Dtypes_fig20d.gif new file mode 100644 index 0000000..fdcb59a Binary files /dev/null and b/doxygen/img/Dtypes_fig20d.gif differ diff --git a/doxygen/img/Dtypes_fig21.gif b/doxygen/img/Dtypes_fig21.gif new file mode 100644 index 0000000..6d30528 Binary files /dev/null and b/doxygen/img/Dtypes_fig21.gif differ diff --git a/doxygen/img/Dtypes_fig22.gif b/doxygen/img/Dtypes_fig22.gif new file mode 100644 index 0000000..5e2ca99 Binary files /dev/null and b/doxygen/img/Dtypes_fig22.gif differ diff --git a/doxygen/img/Dtypes_fig23.gif b/doxygen/img/Dtypes_fig23.gif new file mode 100644 index 0000000..f0c9882 Binary files /dev/null and b/doxygen/img/Dtypes_fig23.gif differ diff --git a/doxygen/img/Dtypes_fig24.gif b/doxygen/img/Dtypes_fig24.gif new file mode 100644 index 0000000..a1c28f4 Binary files /dev/null and b/doxygen/img/Dtypes_fig24.gif differ diff --git a/doxygen/img/Dtypes_fig25a.gif b/doxygen/img/Dtypes_fig25a.gif new file mode 100644 index 0000000..16d3bcc Binary files /dev/null and b/doxygen/img/Dtypes_fig25a.gif differ diff --git a/doxygen/img/Dtypes_fig25c.gif b/doxygen/img/Dtypes_fig25c.gif new file mode 100644 index 0000000..a625b74 Binary files /dev/null and b/doxygen/img/Dtypes_fig25c.gif differ diff --git a/doxygen/img/Dtypes_fig26.gif b/doxygen/img/Dtypes_fig26.gif new file mode 100644 index 0000000..24b34fb Binary files /dev/null and b/doxygen/img/Dtypes_fig26.gif differ diff --git a/doxygen/img/Dtypes_fig27.gif b/doxygen/img/Dtypes_fig27.gif new file mode 100644 index 0000000..71f182a Binary files /dev/null and b/doxygen/img/Dtypes_fig27.gif differ diff --git a/doxygen/img/Dtypes_fig28.gif b/doxygen/img/Dtypes_fig28.gif new file mode 100644 index 0000000..56d8d1b Binary files /dev/null and b/doxygen/img/Dtypes_fig28.gif differ diff --git a/doxygen/img/Dtypes_fig3.gif b/doxygen/img/Dtypes_fig3.gif new file mode 100644 index 0000000..993d12e Binary files /dev/null and b/doxygen/img/Dtypes_fig3.gif differ diff --git a/doxygen/img/Dtypes_fig4.gif b/doxygen/img/Dtypes_fig4.gif new file mode 100644 index 0000000..67aedef Binary files /dev/null and b/doxygen/img/Dtypes_fig4.gif differ diff --git a/doxygen/img/Dtypes_fig5.gif b/doxygen/img/Dtypes_fig5.gif new file mode 100644 index 0000000..075417d Binary files /dev/null and b/doxygen/img/Dtypes_fig5.gif differ diff --git a/doxygen/img/Dtypes_fig6.gif b/doxygen/img/Dtypes_fig6.gif new file mode 100644 index 0000000..516ab95 Binary files /dev/null and b/doxygen/img/Dtypes_fig6.gif differ diff --git a/doxygen/img/Dtypes_fig7.gif b/doxygen/img/Dtypes_fig7.gif new file mode 100644 index 0000000..c18e9dc Binary files /dev/null and b/doxygen/img/Dtypes_fig7.gif differ diff --git a/doxygen/img/Dtypes_fig8.gif b/doxygen/img/Dtypes_fig8.gif new file mode 100644 index 0000000..d75d998 Binary files /dev/null and b/doxygen/img/Dtypes_fig8.gif differ diff --git a/doxygen/img/Dtypes_fig9.gif b/doxygen/img/Dtypes_fig9.gif new file mode 100644 index 0000000..873f0ab Binary files /dev/null and b/doxygen/img/Dtypes_fig9.gif differ diff --git a/doxygen/img/Files_fig3.gif b/doxygen/img/Files_fig3.gif new file mode 100644 index 0000000..6912f5c Binary files /dev/null and b/doxygen/img/Files_fig3.gif differ diff --git a/doxygen/img/Files_fig4.gif b/doxygen/img/Files_fig4.gif new file mode 100644 index 0000000..b4ff107 Binary files /dev/null and b/doxygen/img/Files_fig4.gif differ diff --git a/doxygen/img/Groups_fig1.gif b/doxygen/img/Groups_fig1.gif new file mode 100644 index 0000000..193fff9 Binary files /dev/null and b/doxygen/img/Groups_fig1.gif differ diff --git a/doxygen/img/Groups_fig10_a.gif b/doxygen/img/Groups_fig10_a.gif new file mode 100644 index 0000000..6595b34 Binary files /dev/null and b/doxygen/img/Groups_fig10_a.gif differ diff --git a/doxygen/img/Groups_fig10_b.gif b/doxygen/img/Groups_fig10_b.gif new file mode 100644 index 0000000..9e7c234 Binary files /dev/null and b/doxygen/img/Groups_fig10_b.gif differ diff --git a/doxygen/img/Groups_fig10_c.gif b/doxygen/img/Groups_fig10_c.gif new file mode 100644 index 0000000..20900ac Binary files /dev/null and b/doxygen/img/Groups_fig10_c.gif differ diff --git a/doxygen/img/Groups_fig10_d.gif b/doxygen/img/Groups_fig10_d.gif new file mode 100644 index 0000000..7251919 Binary files /dev/null and b/doxygen/img/Groups_fig10_d.gif differ diff --git a/doxygen/img/Groups_fig11_a.gif b/doxygen/img/Groups_fig11_a.gif new file mode 100644 index 0000000..1d041d0 Binary files /dev/null and b/doxygen/img/Groups_fig11_a.gif differ diff --git a/doxygen/img/Groups_fig11_b.gif b/doxygen/img/Groups_fig11_b.gif new file mode 100644 index 0000000..732109b Binary files /dev/null and b/doxygen/img/Groups_fig11_b.gif differ diff --git a/doxygen/img/Groups_fig11_c.gif b/doxygen/img/Groups_fig11_c.gif new file mode 100644 index 0000000..f1444eb Binary files /dev/null and b/doxygen/img/Groups_fig11_c.gif differ diff --git a/doxygen/img/Groups_fig11_d.gif b/doxygen/img/Groups_fig11_d.gif new file mode 100644 index 0000000..ee1b740 Binary files /dev/null and b/doxygen/img/Groups_fig11_d.gif differ diff --git a/doxygen/img/Groups_fig2.gif b/doxygen/img/Groups_fig2.gif new file mode 100644 index 0000000..d14b0ff Binary files /dev/null and b/doxygen/img/Groups_fig2.gif differ diff --git a/doxygen/img/Groups_fig3.gif b/doxygen/img/Groups_fig3.gif new file mode 100644 index 0000000..aaa1fe7 Binary files /dev/null and b/doxygen/img/Groups_fig3.gif differ diff --git a/doxygen/img/Groups_fig4.gif b/doxygen/img/Groups_fig4.gif new file mode 100644 index 0000000..a077bf3 Binary files /dev/null and b/doxygen/img/Groups_fig4.gif differ diff --git a/doxygen/img/Groups_fig5.gif b/doxygen/img/Groups_fig5.gif new file mode 100644 index 0000000..55ddc3c Binary files /dev/null and b/doxygen/img/Groups_fig5.gif differ diff --git a/doxygen/img/Groups_fig6.gif b/doxygen/img/Groups_fig6.gif new file mode 100644 index 0000000..53a18d4 Binary files /dev/null and b/doxygen/img/Groups_fig6.gif differ diff --git a/doxygen/img/Groups_fig9_a.gif b/doxygen/img/Groups_fig9_a.gif new file mode 100644 index 0000000..af0ab69 Binary files /dev/null and b/doxygen/img/Groups_fig9_a.gif differ diff --git a/doxygen/img/Groups_fig9_aa.gif b/doxygen/img/Groups_fig9_aa.gif new file mode 100644 index 0000000..43ed356 Binary files /dev/null and b/doxygen/img/Groups_fig9_aa.gif differ diff --git a/doxygen/img/Groups_fig9_b.gif b/doxygen/img/Groups_fig9_b.gif new file mode 100644 index 0000000..b07ec9c Binary files /dev/null and b/doxygen/img/Groups_fig9_b.gif differ diff --git a/doxygen/img/Groups_fig9_bb.gif b/doxygen/img/Groups_fig9_bb.gif new file mode 100644 index 0000000..e13f534 Binary files /dev/null and b/doxygen/img/Groups_fig9_bb.gif differ diff --git a/doxygen/img/LBDsetSubRWProg.png b/doxygen/img/LBDsetSubRWProg.png new file mode 100644 index 0000000..4627740 Binary files /dev/null and b/doxygen/img/LBDsetSubRWProg.png differ diff --git a/doxygen/img/Pmodel_fig2.gif b/doxygen/img/Pmodel_fig2.gif new file mode 100644 index 0000000..8be15fb Binary files /dev/null and b/doxygen/img/Pmodel_fig2.gif differ diff --git a/doxygen/img/Pmodel_fig3.gif b/doxygen/img/Pmodel_fig3.gif new file mode 100644 index 0000000..211f2ab Binary files /dev/null and b/doxygen/img/Pmodel_fig3.gif differ diff --git a/doxygen/img/Pmodel_fig5_a.gif b/doxygen/img/Pmodel_fig5_a.gif new file mode 100644 index 0000000..6607b1c Binary files /dev/null and b/doxygen/img/Pmodel_fig5_a.gif differ diff --git a/doxygen/img/Pmodel_fig5_b.gif b/doxygen/img/Pmodel_fig5_b.gif new file mode 100644 index 0000000..548df28 Binary files /dev/null and b/doxygen/img/Pmodel_fig5_b.gif differ diff --git a/doxygen/img/Pmodel_fig5_c.gif b/doxygen/img/Pmodel_fig5_c.gif new file mode 100644 index 0000000..459bc66 Binary files /dev/null and b/doxygen/img/Pmodel_fig5_c.gif differ diff --git a/doxygen/img/Pmodel_fig5_d.gif b/doxygen/img/Pmodel_fig5_d.gif new file mode 100644 index 0000000..207350d Binary files /dev/null and b/doxygen/img/Pmodel_fig5_d.gif differ diff --git a/doxygen/img/Pmodel_fig5_e.gif b/doxygen/img/Pmodel_fig5_e.gif new file mode 100644 index 0000000..ee4f656 Binary files /dev/null and b/doxygen/img/Pmodel_fig5_e.gif differ diff --git a/doxygen/img/Pmodel_fig6.gif b/doxygen/img/Pmodel_fig6.gif new file mode 100644 index 0000000..2dac825 Binary files /dev/null and b/doxygen/img/Pmodel_fig6.gif differ diff --git a/doxygen/img/PropListClassInheritance.gif b/doxygen/img/PropListClassInheritance.gif new file mode 100644 index 0000000..c6f0309 Binary files /dev/null and b/doxygen/img/PropListClassInheritance.gif differ diff --git a/doxygen/img/PropListEcosystem.gif b/doxygen/img/PropListEcosystem.gif new file mode 100644 index 0000000..cf77ba4 Binary files /dev/null and b/doxygen/img/PropListEcosystem.gif differ diff --git a/doxygen/img/Shared_Attribute.jpg b/doxygen/img/Shared_Attribute.jpg new file mode 100644 index 0000000..058eeec Binary files /dev/null and b/doxygen/img/Shared_Attribute.jpg differ diff --git a/doxygen/img/StormDataset.png b/doxygen/img/StormDataset.png new file mode 100644 index 0000000..da44335 Binary files /dev/null and b/doxygen/img/StormDataset.png differ diff --git a/doxygen/img/UML_Attribute.jpg b/doxygen/img/UML_Attribute.jpg new file mode 100644 index 0000000..5b3db7d Binary files /dev/null and b/doxygen/img/UML_Attribute.jpg differ diff --git a/doxygen/img/UML_FileAndProps.gif b/doxygen/img/UML_FileAndProps.gif new file mode 100644 index 0000000..1de96c6 Binary files /dev/null and b/doxygen/img/UML_FileAndProps.gif differ diff --git a/doxygen/img/VFL_Drivers.gif b/doxygen/img/VFL_Drivers.gif new file mode 100644 index 0000000..4b626c6 Binary files /dev/null and b/doxygen/img/VFL_Drivers.gif differ diff --git a/doxygen/img/cmpnddtype.png b/doxygen/img/cmpnddtype.png new file mode 100644 index 0000000..53b4afd Binary files /dev/null and b/doxygen/img/cmpnddtype.png differ diff --git a/doxygen/img/crtatt.png b/doxygen/img/crtatt.png new file mode 100644 index 0000000..93ac36c Binary files /dev/null and b/doxygen/img/crtatt.png differ diff --git a/doxygen/img/crtdset.png b/doxygen/img/crtdset.png new file mode 100644 index 0000000..9cc3085 Binary files /dev/null and b/doxygen/img/crtdset.png differ diff --git a/doxygen/img/crtf-pic.png b/doxygen/img/crtf-pic.png new file mode 100644 index 0000000..f7c49b8 Binary files /dev/null and b/doxygen/img/crtf-pic.png differ diff --git a/doxygen/img/crtgrp.png b/doxygen/img/crtgrp.png new file mode 100644 index 0000000..506bc68 Binary files /dev/null and b/doxygen/img/crtgrp.png differ diff --git a/doxygen/img/dataset.png b/doxygen/img/dataset.png new file mode 100644 index 0000000..1524417 Binary files /dev/null and b/doxygen/img/dataset.png differ diff --git a/doxygen/img/datasetwdata.png b/doxygen/img/datasetwdata.png new file mode 100644 index 0000000..5f03827 Binary files /dev/null and b/doxygen/img/datasetwdata.png differ diff --git a/doxygen/img/dataspace.png b/doxygen/img/dataspace.png new file mode 100644 index 0000000..95e0b7d Binary files /dev/null and b/doxygen/img/dataspace.png differ diff --git a/doxygen/img/dataspace1.png b/doxygen/img/dataspace1.png new file mode 100644 index 0000000..f21a5f5 Binary files /dev/null and b/doxygen/img/dataspace1.png differ diff --git a/doxygen/img/datatype.png b/doxygen/img/datatype.png new file mode 100644 index 0000000..6ea5732 Binary files /dev/null and b/doxygen/img/datatype.png differ diff --git a/doxygen/img/dtypes_fig25b.gif b/doxygen/img/dtypes_fig25b.gif new file mode 100644 index 0000000..9dbc225 Binary files /dev/null and b/doxygen/img/dtypes_fig25b.gif differ diff --git a/doxygen/img/fileobj.png b/doxygen/img/fileobj.png new file mode 100644 index 0000000..ae5212d Binary files /dev/null and b/doxygen/img/fileobj.png differ diff --git a/doxygen/img/group.png b/doxygen/img/group.png new file mode 100644 index 0000000..7fec7fc Binary files /dev/null and b/doxygen/img/group.png differ diff --git a/doxygen/img/hdfview-anthrstrm-img.png b/doxygen/img/hdfview-anthrstrm-img.png new file mode 100644 index 0000000..add4e48 Binary files /dev/null and b/doxygen/img/hdfview-anthrstrm-img.png differ diff --git a/doxygen/img/hdfview-anthrstrm-sprdsht.png b/doxygen/img/hdfview-anthrstrm-sprdsht.png new file mode 100644 index 0000000..4584fd5 Binary files /dev/null and b/doxygen/img/hdfview-anthrstrm-sprdsht.png differ diff --git a/doxygen/img/hdfview-anthrstrm.png b/doxygen/img/hdfview-anthrstrm.png new file mode 100644 index 0000000..afc2de3 Binary files /dev/null and b/doxygen/img/hdfview-anthrstrm.png differ diff --git a/doxygen/img/hdfview-imgicon.png b/doxygen/img/hdfview-imgicon.png new file mode 100644 index 0000000..f189080 Binary files /dev/null and b/doxygen/img/hdfview-imgicon.png differ diff --git a/doxygen/img/hdfview-imgprop.png b/doxygen/img/hdfview-imgprop.png new file mode 100644 index 0000000..717727b Binary files /dev/null and b/doxygen/img/hdfview-imgprop.png differ diff --git a/doxygen/img/hdfview-imgsubset.png b/doxygen/img/hdfview-imgsubset.png new file mode 100644 index 0000000..19cec57 Binary files /dev/null and b/doxygen/img/hdfview-imgsubset.png differ diff --git a/doxygen/img/hdfview-newcmpd.png b/doxygen/img/hdfview-newcmpd.png new file mode 100644 index 0000000..b07b5f8 Binary files /dev/null and b/doxygen/img/hdfview-newcmpd.png differ diff --git a/doxygen/img/hdfview-newimgsubset.png b/doxygen/img/hdfview-newimgsubset.png new file mode 100644 index 0000000..fd16b23 Binary files /dev/null and b/doxygen/img/hdfview-newimgsubset.png differ diff --git a/doxygen/img/hdfview-prop.png b/doxygen/img/hdfview-prop.png new file mode 100644 index 0000000..16c0904 Binary files /dev/null and b/doxygen/img/hdfview-prop.png differ diff --git a/doxygen/img/hdfview-qf.png b/doxygen/img/hdfview-qf.png new file mode 100644 index 0000000..edc371f Binary files /dev/null and b/doxygen/img/hdfview-qf.png differ diff --git a/doxygen/img/hdfview-regref.png b/doxygen/img/hdfview-regref.png new file mode 100644 index 0000000..7f2b02a Binary files /dev/null and b/doxygen/img/hdfview-regref.png differ diff --git a/doxygen/img/hdfview-regref1.png b/doxygen/img/hdfview-regref1.png new file mode 100644 index 0000000..f754931 Binary files /dev/null and b/doxygen/img/hdfview-regref1.png differ diff --git a/doxygen/img/hdfview-regref2.png b/doxygen/img/hdfview-regref2.png new file mode 100644 index 0000000..5a73c01 Binary files /dev/null and b/doxygen/img/hdfview-regref2.png differ diff --git a/doxygen/img/hdfview-regrefval.png b/doxygen/img/hdfview-regrefval.png new file mode 100644 index 0000000..e0a666b Binary files /dev/null and b/doxygen/img/hdfview-regrefval.png differ diff --git a/doxygen/img/hdfview-table.png b/doxygen/img/hdfview-table.png new file mode 100644 index 0000000..69301bc Binary files /dev/null and b/doxygen/img/hdfview-table.png differ diff --git a/doxygen/img/hdfview-tree.png b/doxygen/img/hdfview-tree.png new file mode 100644 index 0000000..8ba2621 Binary files /dev/null and b/doxygen/img/hdfview-tree.png differ diff --git a/doxygen/img/imgLBDsetCreate.gif b/doxygen/img/imgLBDsetCreate.gif new file mode 100644 index 0000000..67585ef Binary files /dev/null and b/doxygen/img/imgLBDsetCreate.gif differ diff --git a/doxygen/img/imgLBDsetSubRW11.png b/doxygen/img/imgLBDsetSubRW11.png new file mode 100644 index 0000000..8b1df86 Binary files /dev/null and b/doxygen/img/imgLBDsetSubRW11.png differ diff --git a/doxygen/img/imgLBDsetSubRW12.png b/doxygen/img/imgLBDsetSubRW12.png new file mode 100644 index 0000000..976966a Binary files /dev/null and b/doxygen/img/imgLBDsetSubRW12.png differ diff --git a/doxygen/img/imgLBDsetSubRW31.png b/doxygen/img/imgLBDsetSubRW31.png new file mode 100644 index 0000000..31d5098 Binary files /dev/null and b/doxygen/img/imgLBDsetSubRW31.png differ diff --git a/doxygen/img/imgLBDsetSubRW32.png b/doxygen/img/imgLBDsetSubRW32.png new file mode 100644 index 0000000..f7d82fd Binary files /dev/null and b/doxygen/img/imgLBDsetSubRW32.png differ diff --git a/doxygen/img/imgLBDsetSubRW33.png b/doxygen/img/imgLBDsetSubRW33.png new file mode 100644 index 0000000..69a368b Binary files /dev/null and b/doxygen/img/imgLBDsetSubRW33.png differ diff --git a/doxygen/img/imgLBFile.gif b/doxygen/img/imgLBFile.gif new file mode 100644 index 0000000..b79c6d6 Binary files /dev/null and b/doxygen/img/imgLBFile.gif differ diff --git a/doxygen/img/imggrpcreate.gif b/doxygen/img/imggrpcreate.gif new file mode 100644 index 0000000..ac1dcf9 Binary files /dev/null and b/doxygen/img/imggrpcreate.gif differ diff --git a/doxygen/img/imggrpdsets.gif b/doxygen/img/imggrpdsets.gif new file mode 100644 index 0000000..3383dc6 Binary files /dev/null and b/doxygen/img/imggrpdsets.gif differ diff --git a/doxygen/img/imggrps.gif b/doxygen/img/imggrps.gif new file mode 100644 index 0000000..d48dbab Binary files /dev/null and b/doxygen/img/imggrps.gif differ diff --git a/doxygen/img/newgroupimage.png b/doxygen/img/newgroupimage.png new file mode 100644 index 0000000..7bc4c90 Binary files /dev/null and b/doxygen/img/newgroupimage.png differ diff --git a/doxygen/img/noattrs.png b/doxygen/img/noattrs.png new file mode 100644 index 0000000..13abcc5 Binary files /dev/null and b/doxygen/img/noattrs.png differ diff --git a/doxygen/img/properties.png b/doxygen/img/properties.png new file mode 100644 index 0000000..083dc14 Binary files /dev/null and b/doxygen/img/properties.png differ diff --git a/doxygen/img/scarletletter.png b/doxygen/img/scarletletter.png new file mode 100644 index 0000000..7c5d2e6 Binary files /dev/null and b/doxygen/img/scarletletter.png differ diff --git a/doxygen/img/showasimage.png b/doxygen/img/showasimage.png new file mode 100644 index 0000000..8377292 Binary files /dev/null and b/doxygen/img/showasimage.png differ diff --git a/doxygen/img/storm.png b/doxygen/img/storm.png new file mode 100644 index 0000000..769b037 Binary files /dev/null and b/doxygen/img/storm.png differ diff --git a/doxygen/img/tutr-lochk.png b/doxygen/img/tutr-lochk.png new file mode 100644 index 0000000..297cd6d Binary files /dev/null and b/doxygen/img/tutr-lochk.png differ diff --git a/doxygen/img/tutr-lochks.png b/doxygen/img/tutr-lochks.png new file mode 100644 index 0000000..477fc1d Binary files /dev/null and b/doxygen/img/tutr-lochks.png differ diff --git a/doxygen/img/tutr-locons.png b/doxygen/img/tutr-locons.png new file mode 100644 index 0000000..bea5be4 Binary files /dev/null and b/doxygen/img/tutr-locons.png differ diff --git a/doxygen/img/vol_architecture.png b/doxygen/img/vol_architecture.png new file mode 100755 index 0000000..10e5596 Binary files /dev/null and b/doxygen/img/vol_architecture.png differ diff --git a/fortran/src/H5Aff.F90 b/fortran/src/H5Aff.F90 index 7612bbd..e167b7f 100644 --- a/fortran/src/H5Aff.F90 +++ b/fortran/src/H5Aff.F90 @@ -1313,7 +1313,7 @@ CONTAINS !! !! \param attr_id Identifier of an attribute to write. !! \param memtype_id Identifier of the attribute datatype (in memory). -!! \param buf Data buffer; may be a scalar or an array. +!! \param buf Data buffer; may be a scalar or an array. !! \param dims Array to hold corresponding dimension sizes of data buffer buf; !! dim(k) has value of the k-th dimension of buffer buf; values are ignored if buf is a scalar. !! \param hdferr \fortran_error @@ -1353,8 +1353,8 @@ CONTAINS !! !! \param attr_id Identifier of an attribute to read. !! \param memtype_id Identifier of the attribute datatype (in memory). -!! \param buf Buffer for data to be read. -!! \param dims Array to hold corresponding dimension sizes of data buffer buf; +!! \param buf Buffer for data to be read. +!! \param dims Array to hold corresponding dimension sizes of data buffer buf; !! dim(k) has value of the k-th dimension of buffer buf; values are ignored if buf is a scalar. !! \param hdferr \fortran_error !! diff --git a/fortran/src/H5Dff.F90 b/fortran/src/H5Dff.F90 index 7707083..35a959e 100644 --- a/fortran/src/H5Dff.F90 +++ b/fortran/src/H5Dff.F90 @@ -210,15 +210,15 @@ CONTAINS !! !! \brief Creates a dataset at the specified location. !! -!! \param loc_id File or group identifier. -!! \param name Dataset name. -!! \param type_id Dataset datatype identifier. -!! \param space_id Dataset dataspace identifier. -!! \param dset_id Dataset identifier. +!! \param loc_id File or group identifier +!! \param name Dataset name +!! \param type_id Dataset datatype identifier +!! \param space_id Dataset dataspace identifier +!! \param dset_id Dataset identifier !! \param hdferr \fortran_error -!! \param dcpl_id Dataset creation property list. -!! \param lcpl_id Link creation property list. -!! \param dapl_id Dataset access property list. +!! \param dcpl_id Dataset creation property list +!! \param lcpl_id Link creation property list +!! \param dapl_id Dataset access property list !! SUBROUTINE h5dcreate_f(loc_id, name, type_id, space_id, dset_id, & hdferr, dcpl_id, lcpl_id, dapl_id) @@ -279,11 +279,11 @@ CONTAINS !! !! \brief Opens an existing dataset. !! -!! \param loc_id File or group identifier. -!! \param name Dataset name. -!! \param dset_id Dataset identifier. +!! \param loc_id File or group identifier +!! \param name Dataset name +!! \param dset_id Dataset identifier !! \param hdferr \fortran_error -!! \param dapl_id Dataset access property list. +!! \param dapl_id Dataset access property list !! SUBROUTINE h5dopen_f(loc_id, name, dset_id, hdferr, dapl_id) IMPLICIT NONE @@ -323,7 +323,7 @@ CONTAINS !! !! \brief Closes a dataset. !! -!! \param dset_id Dataset identifier. +!! \param dset_id Dataset identifier !! \param hdferr \fortran_error !! SUBROUTINE h5dclose_f(dset_id, hdferr) @@ -349,8 +349,8 @@ CONTAINS !! \brief Returns an identifier for a copy of the datatype for a !! dataset. !! -!! \param dataset_id Dataset identifier. -!! \param datatype_id Dataspace identifier. +!! \param dataset_id Dataset identifier +!! \param datatype_id Dataspace identifier !! \param hdferr \fortran_error !! SUBROUTINE h5dget_type_f(dataset_id, datatype_id, hdferr) @@ -376,8 +376,8 @@ CONTAINS !! !! \brief Extends a dataset with unlimited dimension. !! -!! \param dataset_id Dataset identifier. -!! \param size Array containing the new magnitude of each dimension. +!! \param dataset_id Dataset identifier +!! \param size Array containing the new magnitude of each dimension !! \param hdferr \fortran_error !! SUBROUTINE h5dset_extent_f(dataset_id, size, hdferr) @@ -403,8 +403,8 @@ CONTAINS !! !! \brief Returns an identifier for a copy of the dataset creation property list for a dataset. !! -!! \param dataset_id Dataset identifier. -!! \param plist_id Creation property list identifier. +!! \param dataset_id Dataset identifier +!! \param plist_id Creation property list identifier !! \param hdferr \fortran_error !! SUBROUTINE h5dget_create_plist_f(dataset_id, plist_id, hdferr) @@ -429,8 +429,8 @@ CONTAINS !! !! \brief Returns the amount of storage requires by a dataset !! -!! \param dataset_id Dataset identifier. -!! \param size Datastorage size. +!! \param dataset_id Dataset identifier +!! \param size Datastorage size !! \param hdferr \fortran_error !! SUBROUTINE h5dget_storage_size_f(dataset_id, size, hdferr) @@ -455,10 +455,10 @@ CONTAINS !! !! \brief Returns maximum length of the VL array elements !! -!! \param dataset_id Dataset identifier. -!! \param type_id Datatype identifier. -!! \param space_id Dataspace identifier. -!! \param len Buffer size. +!! \param dataset_id Dataset identifier +!! \param type_id Datatype identifier +!! \param space_id Dataspace identifier +!! \param len Buffer size !! \param hdferr \fortran_error !! SUBROUTINE h5dvlen_get_max_len_f(dataset_id, type_id, space_id, len, hdferr) @@ -487,7 +487,7 @@ CONTAINS !! !! \brief Returns the status of data space allocation. !! -!! \param dset_id Dataset identifier. +!! \param dset_id Dataset identifier !! \param flag Status; may have one of the following values: !! \li H5D_SPACE_STS_ERROR_F !! \li H5D_SPACE_STS_NOT_ALLOCATED_F diff --git a/fortran/src/H5Fff.F90 b/fortran/src/H5Fff.F90 index c58cb9e..817dab0 100644 --- a/fortran/src/H5Fff.F90 +++ b/fortran/src/H5Fff.F90 @@ -4,6 +4,10 @@ !! !! @see @ref H5F_UG, User Guide !! + +!> @ingroup FH5F +!! +!! @brief This module contains Fortran interfaces for H5F functions. ! ! COPYRIGHT ! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * @@ -628,7 +632,7 @@ CONTAINS !! \param file_id Target file identifier. !! \param buf_ptr Pointer to the buffer into which the image of the HDF5 file is to be copied. !! \param buf_len Size of the supplied buffer. -!! \param hdferr Error code: 0 on success and -1 on failure. +!! \param hdferr \fortran_error !! \param buf_size Returns the size in bytes of the buffer required to store the file image, no data will be copied. !! SUBROUTINE h5fget_file_image_f(file_id, buf_ptr, buf_len, hdferr, buf_size) @@ -673,7 +677,7 @@ CONTAINS !! !! \param file_id Target file identifier. !! \param minimize Value of the setting. -!! \param hdferr Error code: 0 on success and -1 on failure. +!! \param hdferr \fortran_error !! SUBROUTINE h5fget_dset_no_attrs_hint_f(file_id, minimize, hdferr) IMPLICIT NONE @@ -706,7 +710,7 @@ CONTAINS !! !! \param file_id Target file identifier. !! \param minimize Value of the setting. -!! \param hdferr Error code: 0 on success and -1 on failure. +!! \param hdferr \fortran_error !! SUBROUTINE h5fset_dset_no_attrs_hint_f(file_id, minimize, hdferr) IMPLICIT NONE diff --git a/fortran/src/H5Lff.F90 b/fortran/src/H5Lff.F90 index 0b8af8b..2b4e569 100644 --- a/fortran/src/H5Lff.F90 +++ b/fortran/src/H5Lff.F90 @@ -925,10 +925,10 @@ CONTAINS !! \li H5_ITER_INC_F - Increasing order !! \li H5_ITER_DEC_F - Decreasing order !! \li H5_ITER_NATIVE_F - Fastest available order -!! \param idx Iteration position at which to start. +!! \param idx Iteration position at which to start, or
    +!! Position at which an interrupted iteration may be restarted !! \param op Callback function passing data regarding the link to the calling application. !! \param op_data User-defined pointer to data required by the application for its processing of the link. -!! \param idx Position at which an interrupted iteration may be restarted. !! \param return_value Return context: !! \li Success: The return value of the first operator that !! returns non-zero, or zero if all members were processed with no operator returning non-zero. @@ -983,10 +983,11 @@ CONTAINS !! \li H5_INDEX_NAME_F - Alphanumeric index on name !! \li H5_INDEX_CRT_ORDER_F - Index on creation order !! \param order Order within index: -!! \li H5_ITER_INC_F - Increasing order -!! \li H5_ITER_DEC_F - Decreasing order -!! \li H5_ITER_NATIVE_F - Fastest available order -!! \param idx Position at which an interrupted iteration may be restarted. +!! \li H5_ITER_INC_F - Increasing order +!! \li H5_ITER_DEC_F - Decreasing order +!! \li H5_ITER_NATIVE_F - Fastest available order +!! \param idx Iteration position at which to start, or
    +!! Position at which an interrupted iteration may be restarted !! \param op Callback function passing data regarding the link to the calling application. !! \param op_data User-defined pointer to data required by the application for its processing of the link. !! \param return_value Return context: @@ -995,7 +996,7 @@ CONTAINS !! \li Failure: Negative if something goes wrong within the !! library, or the negative value returned by one of the operators. !! \param hdferr \fortran_error -!! \param lapl_id Link access property list. +!! \param lapl_id Link access property list !! SUBROUTINE h5literate_by_name_f(loc_id, group_name, index_type, order, & idx, op, op_data, return_value, hdferr, lapl_id) diff --git a/fortran/src/H5Off.F90 b/fortran/src/H5Off.F90 index 2139271..388e30e 100644 --- a/fortran/src/H5Off.F90 +++ b/fortran/src/H5Off.F90 @@ -285,6 +285,7 @@ CONTAINS hdferr = h5oopen_by_token_c(loc_id, token, obj_id) END SUBROUTINE h5oopen_by_token_f + !> !! \ingroup FH5O !! diff --git a/fortran/src/H5Pff.F90 b/fortran/src/H5Pff.F90 index dbae328..e55dc58 100644 --- a/fortran/src/H5Pff.F90 +++ b/fortran/src/H5Pff.F90 @@ -677,8 +677,8 @@ CONTAINS !! !! \brief Sets the size of the parameter used to control the B-trees for indexing chunked datasets !! -!! \param prp_id File creation property list identifier. -!! \param ik 1/2 rank of chunked storage B-tree. +!! \param prp_id File creation property list identifier +!! \param ik 1/2 rank of chunked storage B-tree !! \param hdferr \fortran_error !! SUBROUTINE h5pset_istore_k_f (prp_id, ik, hdferr) @@ -934,7 +934,7 @@ CONTAINS !! !! \param prp_id File access property list identifier. !! \param memb_size Size in bytes of each file member. -!! \param memb_plist Identifier of the file access property list to be used for each family member. +!! \param memb_plist Identifier of the file access property list to be used for each family member !! \param hdferr \fortran_error !! SUBROUTINE h5pset_fapl_family_f(prp_id, memb_size, memb_plist , hdferr) @@ -964,7 +964,7 @@ CONTAINS !! !! \param prp_id File access property list identifier. !! \param memb_size Size in bytes of each file member. -!! \param memb_plist Identifier of the file access property list to be used for each family member. +!! \param memb_plist Identifier of the file access property list to be used for each family member !! \param hdferr \fortran_error !! SUBROUTINE h5pget_fapl_family_f(prp_id, memb_size, memb_plist , hdferr) @@ -1029,8 +1029,8 @@ CONTAINS !! \brief Queries the meta data cache and raw data chunk cache parameters. !! !! \param prp_id File access property list identifier. -!! \param mdc_nelmts Number of elements (objects) in the metadata cache. -!! \param rdcc_nelmts Number of elements (objects) in the raw data chunk cache. +!! \param mdc_nelmts Number of elements (objects) in the metadata cache +!! \param rdcc_nelmts Number of elements (objects) in the raw data chunk cache !! \param rdcc_nbytes Total size of the raw data chunk cache, in bytes. !! \param rdcc_w0 Preemption policy (0 or 1). !! \param hdferr \fortran_error @@ -1285,8 +1285,8 @@ CONTAINS !! !! \brief Returns information about a filter in a pipeline !! -!! \param prp_id Data creation or transfer property list identifier. -!! \param filter_number Sequence number within the filter pipeline of the filter for which information is sought. +!! \param prp_id Data creation or transfer property list identifier +!! \param filter_number Sequence number within the filter pipeline of the filter for which information is sought !! \param filter_id Filter identification number. !! \param flags Bitbit vector specifying certain general properties of the filter. !! \param cd_nelmts Number of elements in cd_values. @@ -2724,9 +2724,9 @@ CONTAINS !! !! \brief Returns information about a filter in a pipeline !! -!! \param prp_id Data creation or transfer property list identifier. +!! \param prp_id Data creation or transfer property list identifier !! \param filter_id Filter identifier. -!! \param flags Bit vector specifying certain general properties of the filter. +!! \param flags Bit vector specifying certain general properties of the filter !! \param cd_nelmts Number of elements in cd_values. !! \param cd_values Auxiliary data for the filter. !! \param namelen Number of characters in the name buffer. @@ -2769,9 +2769,9 @@ CONTAINS !! !! \brief Adds a filter to the filter pipeline. !! -!! \param prp_id Data creation or transfer property list identifier. +!! \param prp_id Data creation or transfer property list identifier !! \param filter Filter to be modified. -!! \param flags Bit vector specifying certain general properties of the filter. +!! \param flags Bit vector specifying certain general properties of the filter !! \param cd_nelmts Number of elements in cd_values. !! \param cd_values Auxiliary data for the filter. !! \param hdferr \fortran_error @@ -2805,7 +2805,7 @@ CONTAINS !! !! \brief Delete one or more filters from the filter pipeline. !! -!! \param prp_id Data creation or transfer property list identifier. +!! \param prp_id Data creation or transfer property list identifier !! \param filter Filter to be removed. !! \param hdferr \fortran_error !! @@ -3379,7 +3379,7 @@ CONTAINS !! Success: Actual length of the expression. If provided buffer "expression" is !! smaller, than expression will be truncated to fit into provided user buffer. !! Failure: -1 -!! \param size Registered size of the transform expression. +!! \param size Registered size of the transform expression !! SUBROUTINE h5pget_data_transform_f(plist_id, expression, hdferr, size) IMPLICIT NONE diff --git a/fortran/src/H5Rff.F90 b/fortran/src/H5Rff.F90 index d373158..f5dfb5c 100644 --- a/fortran/src/H5Rff.F90 +++ b/fortran/src/H5Rff.F90 @@ -222,8 +222,7 @@ CONTAINS !! \param space_id Dataspace identifier. !! \param hdferr \fortran_error !! -SUBROUTINE h5rget_region_region_f(dset_id, ref, space_id, hdferr) - + SUBROUTINE h5rget_region_region_f(dset_id, ref, space_id, hdferr) IMPLICIT NONE INTEGER(HID_T), INTENT(IN) :: dset_id TYPE(hdset_reg_ref_t_f), INTENT(IN) :: ref @@ -246,6 +245,7 @@ SUBROUTINE h5rget_region_region_f(dset_id, ref, space_id, hdferr) hdferr = h5rget_region_region_c(dset_id, ref_f, space_id ) END SUBROUTINE h5rget_region_region_f + !> !! \ingroup FH5R !! @@ -307,6 +307,7 @@ SUBROUTINE h5rget_region_region_f(dset_id, ref, space_id, hdferr) hdferr = h5rcreate_ptr_c(f_ptr, loc_id, name, namelen, INT(0), INT(-1,HID_T)) END SUBROUTINE h5rcreate_object_f + !> !! \ingroup FH5R !! @@ -396,7 +397,7 @@ SUBROUTINE h5rget_region_region_f(dset_id, ref, space_id, hdferr) !> !! \ingroup FH5R !! -!! \brief Opens the HDF5 object referenced. +!! \brief Opens the HDF5 object referenced !! !! \note \fortran_obsolete !! @@ -422,7 +423,7 @@ SUBROUTINE h5rget_region_region_f(dset_id, ref, space_id, hdferr) !> !! \ingroup FH5R !! -!! \brief Opens the dataset region. +!! \brief Opens the dataset region !! !! \note \fortran_obsolete !! diff --git a/fortran/src/H5Sff.F90 b/fortran/src/H5Sff.F90 index 19a5b1f..6599863 100644 --- a/fortran/src/H5Sff.F90 +++ b/fortran/src/H5Sff.F90 @@ -270,7 +270,7 @@ CONTAINS !! \brief Gets the number of element points in the current selection !! !! \param space_id Dataspace identifier. -!! \param num_points Number of element points in the current dataspace selection. +!! \param num_points Number of element points in the current dataspace selection !! \param hdferr \fortran_error !! SUBROUTINE h5sget_select_elem_npoints_f(space_id, num_points, hdferr) @@ -443,7 +443,7 @@ CONTAINS !! !! \brief Verifies that the selection is within the extent of the dataspace. !! -!! \param space_id Identifier for the dataspace for whichselection is verified. +!! \param space_id Identifier for the dataspace for which selection is verified !! \param status TRUE if the selection is contained within the extent, FALSE otherwise. !! \param hdferr \fortran_error !! diff --git a/fortran/src/H5Tff.F90 b/fortran/src/H5Tff.F90 index d32b160..ceb5447 100644 --- a/fortran/src/H5Tff.F90 +++ b/fortran/src/H5Tff.F90 @@ -109,7 +109,7 @@ CONTAINS !! \brief Commits a transient datatype to a file, creating a new named datatype. !! !! \param loc_id Location identifier. -!! \param name Name of the datatype to be stored at the specified location. +!! \param name Name of the datatype to be stored at the specified location !! \param type_id Identifier of a datatype to be stored. !! \param hdferr \fortran_error !! \param lcpl_id Link creation property list. @@ -1391,7 +1391,7 @@ CONTAINS !! !! \brief Creates an array datatype object. !! -!! \param base_id Datatype identifier for the array base datatype. +!! \param base_id Datatype identifier for the array base datatype !! \param rank Rank of the array. !! \param dims Array dimension sizes. !! \param type_id Array datatype identifier. diff --git a/fortran/src/H5VLff.F90 b/fortran/src/H5VLff.F90 index 3ebd2f1..1d60848 100644 --- a/fortran/src/H5VLff.F90 +++ b/fortran/src/H5VLff.F90 @@ -1,4 +1,4 @@ -!> @defgroup FH5VL Fortran Datatype (H5VL) Interface +!> @defgroup FH5VL Fortran VOL (H5VL) Interface !! !! @see H5VL, C-API !! diff --git a/fortran/src/H5Zff.F90 b/fortran/src/H5Zff.F90 index 4fa81e9..711e26b 100644 --- a/fortran/src/H5Zff.F90 +++ b/fortran/src/H5Zff.F90 @@ -1,3 +1,10 @@ +!> @defgroup FH5Z Fortran Filter (H5Z) Interface +!! +!! @see H5Z, C-API +!! +!! @see @ref H5Z_UG, User Guide +!! + !> @ingroup FH5Z !! !! @brief This module contains Fortran interfaces for H5Z functions. diff --git a/fortran/src/H5_buildiface.F90 b/fortran/src/H5_buildiface.F90 index 090b6db..30d29ba 100644 --- a/fortran/src/H5_buildiface.F90 +++ b/fortran/src/H5_buildiface.F90 @@ -423,7 +423,7 @@ PROGRAM H5_buildiface ! buf - Data buffer; may be a scalar or an array ! ! Outputs: -! hdferr - Returns 0 if successful and -1 if fails +! hdferr - \fortran_error ! ! AUTHOR ! Elena Pourmal @@ -550,7 +550,7 @@ PROGRAM H5_buildiface ! ! Outputs: ! buf - Data buffer; may be a scalar or an array -! hdferr - Returns 0 if successful and -1 if fails +! hdferr - \fortran_error ! ! AUTHOR ! Elena Pourmal diff --git a/fortran/src/H5_ff.F90 b/fortran/src/H5_ff.F90 index 5968e73..07705cf 100644 --- a/fortran/src/H5_ff.F90 +++ b/fortran/src/H5_ff.F90 @@ -762,7 +762,7 @@ CONTAINS !! !! \brief Converts the KIND to the correct HDF type !! -!! \param ikind Fortran KIND parameter. +!! \param ikind Fortran KIND parameter !! \param flag Whether KIND is of type INTEGER or REAL: !! \li H5_INTEGER_KIND - integer !! \li H5_REAL_KIND - real @@ -814,8 +814,8 @@ CONTAINS !! !! \brief Computes the offset in memory !! -!! \param start Starting pointer address. -!! \param end Ending pointer address. +!! \param start Starting pointer address +!! \param end Ending pointer address !! !! \result offset Offset of a member within the derived type. !! @@ -836,7 +836,7 @@ CONTAINS !! !! \brief Convert time_t structure (C) to Fortran DATE AND TIME storage format. !! -!! \param stdtime_t Object of type time_t that contains a time value. +!! \param stdtime_t Object of type time_t that contains a time value !! \result datetime A date/time array using Fortran conventions: !! \li datetime(1) = year !! \li datetime(2) = month diff --git a/hl/fortran/src/H5DSff.F90 b/hl/fortran/src/H5DSff.F90 index bb80eb7..dcc6ed0 100644 --- a/hl/fortran/src/H5DSff.F90 +++ b/hl/fortran/src/H5DSff.F90 @@ -1,6 +1,6 @@ -!> @defgroup FH5DS Fortran High-level H5DS Interface +!> @defgroup FH5DS Fortran High Level Dimension Scales (H5DS) Interface !! -!! @see H5DS, C-API +!! @see H5DS, C-HL API !! !! @see @ref H5DS_UG, User Guide !! @@ -40,6 +40,7 @@ MODULE H5DS USE hdf5 CONTAINS + !> !! \ingroup FH5DS !! @@ -47,7 +48,7 @@ CONTAINS !! !! \param dsid The dataset to be made a Dimemsion Scale. !! \param errcode \fortran_error -!! \param dimname The dimension name. +!! \param dimname The dimension name !! SUBROUTINE H5DSset_scale_f( dsid, errcode, dimname) @@ -55,7 +56,7 @@ CONTAINS INTEGER(hid_t), INTENT(in) :: dsid CHARACTER(LEN=*), INTENT(in), OPTIONAL :: dimname - INTEGER :: errcode + INTEGER :: errcode INTEGER(SIZE_T) :: dimname_len ! length of dimname (if present) @@ -88,7 +89,7 @@ CONTAINS !! \param did The dataset. !! \param dsid The scale to be attached. !! \param idx The dimension of \p did that \p dsid is associated with. -!! \param errcode \fortran_error +!! \param errcode \fortran_error !! SUBROUTINE H5DSattach_scale_f( did, dsid, idx, errcode) @@ -125,7 +126,7 @@ CONTAINS !! \param did The dataset. !! \param dsid The scale to be detached. !! \param idx The dimension of \p did to detach. -!! \param errcode \fortran_error +!! \param errcode \fortran_error !! SUBROUTINE H5DSdetach_scale_f( did, dsid, idx, errcode) @@ -164,7 +165,7 @@ CONTAINS !! \param dsid The scale to be attached. !! \param idx The dimension of \p did that \p dsid is associated with. !! \param is_attached If dimension scale \p dsid is currently attached to dimension \p idx of dataset \p did. -!! \param errcode \fortran_error +!! \param errcode \fortran_error !! SUBROUTINE H5DSis_attached_f( did, dsid, idx, is_attached, errcode) @@ -213,7 +214,7 @@ CONTAINS !! !! \param did The data set to query. !! \param is_scale If is a Dimension Scale. -!! \param errcode \fortran_error +!! \param errcode \fortran_error !! SUBROUTINE H5DSis_scale_f( did, is_scale, errcode) @@ -253,7 +254,7 @@ CONTAINS !! \param did The data set. !! \param idx The dimension. !! \param label The label. -!! \param errcode \fortran_error +!! \param errcode \fortran_error !! SUBROUTINE H5DSset_label_f( did, idx, label, errcode) @@ -296,7 +297,7 @@ CONTAINS !! \param idx The dimension. !! \param label The label. !! \param size The length of the \p label buffer. -!! \param errcode \fortran_error +!! \param errcode \fortran_error !! SUBROUTINE H5DSget_label_f( did, idx, label, size, errcode) @@ -336,7 +337,7 @@ CONTAINS !! \param did Dimension scale identifier. !! \param name Buffer to contain the returned name. !! \param size Size in bytes, of the name buffer. -!! \param errcode \fortran_error +!! \param errcode \fortran_error !! SUBROUTINE H5DSget_scale_name_f(did, name, size, errcode) @@ -371,7 +372,7 @@ CONTAINS !! \param did The dataset to query. !! \param idx The dimension of \p did to query. !! \param num_scales Number of Dimension Scales associated with \p did. -!! \param errcode \fortran_error +!! \param errcode \fortran_error !! SUBROUTINE H5DSget_num_scales_f( did, idx, num_scales, errcode) diff --git a/hl/fortran/src/H5IMff.F90 b/hl/fortran/src/H5IMff.F90 index a2573d6..967c35d 100644 --- a/hl/fortran/src/H5IMff.F90 +++ b/hl/fortran/src/H5IMff.F90 @@ -1,6 +1,6 @@ -!> @defgroup FH5IM Fortran High-level H5IM Interface +!> @defgroup FH5IM Fortran High Level Images (H5IM) Interface !! -!! @see H5IM, C-API +!! @see H5IM, C-HL API !! !! @see @ref H5IM_UG, User Guide !! @@ -42,13 +42,13 @@ CONTAINS !> !! \ingroup FH5IM !! -!! \brief Creates and writes an image an 8 bit image. +!! \brief Creates and writes an image an 8 bit image !! !! \param loc_id Location identifier. The identifier may be that of a file or group. !! \param dset_name The name of the dataset to create. !! \param width The width of the image. -!! \param height The height of the image. -!! \param buf Buffer with data to be written to the dataset. +!! \param height The height of the image +!! \param buf Buffer with data to be written to the dataset !! \param errcode \fortran_error !! SUBROUTINE h5immake_image_8bit_f(loc_id,& @@ -105,7 +105,7 @@ CONTAINS IMPLICIT NONE - INTEGER(hid_t), INTENT(in) :: loc_id + INTEGER(hid_t), INTENT(in) :: loc_id CHARACTER(len=*), INTENT(in) :: dset_name INTEGER, INTENT(inout), DIMENSION(*) :: buf INTEGER :: errcode @@ -133,7 +133,7 @@ CONTAINS !! \ingroup FH5IM !! !! \brief Creates and writes an image a 24 bit image. -!! +!! !! \param loc_id Location identifier. The identifier may be that of a file or group. !! \param dset_name The name of the dataset to create. !! \param width The width of the image. @@ -307,7 +307,7 @@ CONTAINS IMPORT :: C_CHAR IMPORT :: HID_T, SIZE_T, HSIZE_T IMPLICIT NONE - INTEGER(hid_t), INTENT(in) :: loc_id + INTEGER(hid_t), INTENT(in) :: loc_id INTEGER(size_t) :: namelen CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(in) :: pal_name INTEGER(hsize_t), INTENT(in), DIMENSION(*) :: pal_dims @@ -437,7 +437,7 @@ CONTAINS IMPORT :: C_CHAR IMPORT :: HID_T, SIZE_T, HSIZE_T IMPLICIT NONE - INTEGER(hid_t), INTENT(in) :: loc_id + INTEGER(hid_t), INTENT(in) :: loc_id CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(in) :: image_name INTEGER(hsize_t), INTENT(inout) :: npals INTEGER(size_t) :: namelen @@ -471,7 +471,7 @@ CONTAINS CHARACTER(len=*), INTENT(in) :: image_name INTEGER, INTENT(in) :: pal_number INTEGER(hsize_t), DIMENSION(*), INTENT(inout) :: pal_dims - INTEGER :: errcode + INTEGER :: errcode INTEGER(size_t) :: namelen ! name length INTERFACE @@ -512,7 +512,7 @@ CONTAINS IMPLICIT NONE - INTEGER(hid_t), INTENT(in) :: loc_id + INTEGER(hid_t), INTENT(in) :: loc_id CHARACTER(len=*), INTENT(in) :: image_name INTEGER, INTENT(in) :: pal_number INTEGER, INTENT(inout), DIMENSION(*) :: pal_data @@ -525,10 +525,10 @@ CONTAINS IMPORT :: C_CHAR IMPORT :: HID_T, SIZE_T IMPLICIT NONE - INTEGER(hid_t), INTENT(in) :: loc_id + INTEGER(hid_t), INTENT(in) :: loc_id INTEGER(size_t) :: namelen CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(in) :: image_name - INTEGER, INTENT(in) :: pal_number + INTEGER, INTENT(in) :: pal_number INTEGER, INTENT(inout), DIMENSION(*) :: pal_data END FUNCTION h5imget_palette_c END INTERFACE @@ -575,7 +575,3 @@ CONTAINS END MODULE H5IM - - - - diff --git a/hl/fortran/src/H5LTff.F90 b/hl/fortran/src/H5LTff.F90 index d2e9daf..3b50ad8 100644 --- a/hl/fortran/src/H5LTff.F90 +++ b/hl/fortran/src/H5LTff.F90 @@ -1,6 +1,6 @@ -!> @defgroup FH5LT Fortran High-level H5LT Interface +!> @defgroup FH5LT Fortran High Level Lite (H5LT) Interface !! -!! @see H5LT, C-API +!! @see H5LT, C-HL API !! !! @see @ref H5LT_UG, User Guide !! @@ -98,7 +98,7 @@ MODULE H5LT_CONST CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(in) :: attr_name ! name of the attribute INTEGER(size_t), INTENT(in) :: size ! size of attribute array TYPE(C_PTR), VALUE :: buf ! data buffer - CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(in) :: dtype ! flag indicating the datatype of the the buffer: + CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(in) :: dtype ! flag indicating the datatype of the buffer: ! R=Real, D=DOUBLE, I=Integer, C=Character INTEGER(size_t) :: SizeOf_buf ! Sizeof the buf datatype END FUNCTION h5ltset_attribute_c @@ -116,7 +116,7 @@ MODULE H5LT_CONST CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(in) :: dset_name ! name of the dataset CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(in) :: attr_name ! name of the attribute TYPE(C_PTR), VALUE :: buf ! data buffer - CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(in) :: dtype ! flag indicating the datatype of the the buffer: + CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(in) :: dtype ! flag indicating the datatype of the buffer: ! R=Real, D=DOUBLE, I=Integer INTEGER(size_t), INTENT(in) :: SizeOf_buf ! Sizeof the buf data type END FUNCTION h5ltget_attribute_c @@ -291,7 +291,7 @@ CONTAINS INTEGER, INTENT(in) :: rank INTEGER(hsize_t), DIMENSION(*), INTENT(in) :: dims INTEGER(hid_t), INTENT(in) :: type_id - INTEGER :: errcode + INTEGER :: errcode INTEGER(size_t) :: namelen ! name length INTEGER, INTENT(in), & DIMENSION(dims(1),dims(2),dims(3),dims(4)), TARGET :: buf ! data buffer @@ -335,7 +335,7 @@ CONTAINS INTEGER, INTENT(in) :: rank INTEGER(hsize_t), DIMENSION(*), INTENT(in) :: dims INTEGER(hid_t), INTENT(in) :: type_id - INTEGER :: errcode + INTEGER :: errcode INTEGER(size_t) :: namelen ! name length INTEGER, INTENT(in), & DIMENSION(dims(1),dims(2),dims(3),dims(4),dims(5),dims(6)), TARGET :: buf ! data buffer @@ -377,7 +377,7 @@ CONTAINS !! !! \brief Reads a dataset of a type \p type_id. !! - !! \note \fortran_approved + !! \note \fortran_approved !! !! \param loc_id Location identifier. The identifier may be that of a file or group. !! \param dset_name The name of the dataset to create. @@ -881,7 +881,7 @@ CONTAINS INTEGER(hid_t), INTENT(in) :: loc_id CHARACTER(LEN=*), INTENT(in) :: dset_name INTEGER(hsize_t), DIMENSION(*), INTENT(in) :: dims - INTEGER :: errcode + INTEGER :: errcode INTEGER(size_t) :: namelen ! name length INTEGER, INTENT(inout), & DIMENSION(dims(1),dims(2),dims(3),dims(4),dims(5)), TARGET :: buf @@ -1039,7 +1039,7 @@ CONTAINS !> !! \ingroup FH5LT !! - !! \brief Creates and writes an attribute and is a generic replacement for data type specific + !! \brief Creates and writes an attribute and is a generic replacement for data type specific !! Fortran h5ltset_attribute_*_f APIs. There is no C equivalent API. !! !! \note \fortran_approved @@ -1105,7 +1105,7 @@ CONTAINS !! \param obj_name The name of the object to attach the attribute. !! \param attr_name The attribute name. !! \param buf Buffer with data to be written to the attribute. - !! \param size The size of the 1D array (one in the case of a scalar attribute). + !! \param size The size of the 1D array (one in the case of a scalar attribute). !! This value is used by H5Screate_simple() to create the dataspace. !! \param errcode \fortran_error !! @@ -1154,7 +1154,7 @@ CONTAINS !! \param obj_name The name of the object to attach the attribute. !! \param attr_name The attribute name. !! \param buf Buffer with data to be written to the attribute. - !! \param size The size of the 1D array (one in the case of a scalar attribute). + !! \param size The size of the 1D array (one in the case of a scalar attribute). !! This value is used by H5Screate_simple() to create the dataspace. !! \param errcode \fortran_error !! @@ -1202,7 +1202,7 @@ CONTAINS !! \param obj_name The name of the object to attach the attribute. !! \param attr_name The attribute name. !! \param buf Buffer with data to be written to the attribute. - !! \param size The size of the 1D array (one in the case of a scalar attribute). + !! \param size The size of the 1D array (one in the case of a scalar attribute). !! This value is used by H5Screate_simple() to create the dataspace. !! \param errcode \fortran_error !! @@ -1601,7 +1601,7 @@ CONTAINS !> !! \ingroup FH5LT !! - !! \brief Retrieves information about a dataset. + !! \brief Retrieves information about a dataset. !! !! \param loc_id Identifier of the object to locate the dataset within. !! \param dset_name The dataset name. @@ -1633,7 +1633,7 @@ CONTAINS IMPORT :: HID_T, SIZE_T, HSIZE_T IMPLICIT NONE INTEGER(hid_t), INTENT(in) :: loc_id - INTEGER(size_t) :: namelen + INTEGER(size_t) :: namelen CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(in) :: dset_name INTEGER(hsize_t),DIMENSION(*),INTENT(inout):: dims INTEGER, INTENT(inout) :: type_class @@ -1759,7 +1759,7 @@ CONTAINS !! \brief Determines whether an HDF5 path is valid and, optionally, whether the path resolves to an HDF5 object. !! !! \param loc_id Identifier of an object in the file. - !! \param path The path to the object to check; links in path may be of any type. + !! \param path The path to the object to check; links in path may be of any type. !! \param check_object_valid Indicates whether to check if the final component of the path resolves to a valid object. !! \param path_valid Object status. !! \param errcode \fortran_error diff --git a/hl/fortran/src/H5TBff.F90 b/hl/fortran/src/H5TBff.F90 index 611d5ec..52af33f 100644 --- a/hl/fortran/src/H5TBff.F90 +++ b/hl/fortran/src/H5TBff.F90 @@ -1,6 +1,6 @@ -!> @defgroup FH5TB Fortran High-level H5TB Interface +!> @defgroup FH5TB Fortran High Level Table (H5TB) Interface !! -!! @see H5TB, C-API +!! @see H5TB, C-HL API !! !! @see @ref H5TB_UG, User Guide !! @@ -45,7 +45,7 @@ INTERFACE h5tbwrite_field_name_f #ifdef H5_DOXYGEN_FORTRAN - MODULE PROCEDURE h5tbwrite_field_name_f + MODULE PROCEDURE h5tbwrite_field_name_f #else MODULE PROCEDURE h5tbwrite_field_name_f_int MODULE PROCEDURE h5tbwrite_field_name_f_string @@ -214,12 +214,12 @@ CONTAINS !! \param field_types An array containing the type of the fields. !! \param chunk_size The chunk size. !! \param compress Flag that turns compression on or off. -!! \param errcode \fortran_error +!! \param errcode \fortran_error !! #ifdef H5_DOXYGEN_FORTRAN SUBROUTINE h5tbmake_table_f(& #else - SUBROUTINE h5tbmake_table_f90(& + SUBROUTINE h5tbmake_table_f90(& #endif table_title,& loc_id,& @@ -308,7 +308,7 @@ CONTAINS #ifdef H5_DOXYGEN_FORTRAN END SUBROUTINE h5tbmake_table_f #else - END SUBROUTINE h5tbmake_table_f90 + END SUBROUTINE h5tbmake_table_f90 #endif !> @@ -331,7 +331,7 @@ CONTAINS !! \param fill_data Fill values data !! \param compress Flag that turns compression on or off !! \param data Buffer with data to be written to the table -!! \param errcode \fortran_error +!! \param errcode \fortran_error !! #ifdef H5_DOXYGEN_FORTRAN SUBROUTINE h5tbmake_table_f(& @@ -446,9 +446,9 @@ CONTAINS !! \param nfields Number of fields, i.e., size of dst_offset and dst_sizes arrays. !! \param dst_size The size of the structure type, as calculated by sizeof or storage_size !! \param dst_offset An array containing the offsets of the fields. These offsets can be calculated with H5OFFSETOF. -!! \param dst_sizes An array containing the sizes of the fields. These sizes can be calculated with sizeof or storage_size. +!! \param dst_sizes An array containing the sizes of the fields. These sizes can be calculated with sizeof or storage_size. !! \param dst_buf Pointer to buffer with data. -!! \param errcode \fortran_error +!! \param errcode \fortran_error !! SUBROUTINE h5tbread_table_f(loc_id, dset_name, nfields, dst_size, dst_offset, & dst_sizes, dst_buf, errcode) @@ -509,7 +509,7 @@ CONTAINS !! \ingroup FH5TB !! !! \brief Overwrites field. -!! +!! !! \param loc_id Location identifier. The identifier may be that of a file or group. !! \param dset_name The name of the dataset to overwrite !! \param field_name The names of the fields to write @@ -556,7 +556,7 @@ CONTAINS errcode = h5tbwrite_field_name_c(loc_id,namelen,dset_name,namelen1,field_name,& start,nrecords,type_size,f_ptr) #ifdef H5_DOXYGEN_FORTRAN - END SUBROUTINE h5tbwrite_field_name_f + END SUBROUTINE h5tbwrite_field_name_f #else END SUBROUTINE h5tbwrite_field_name_f_int @@ -607,7 +607,7 @@ CONTAINS !! \param nrecords The number of records to read. !! \param type_size The size in bytes of the structure associated with the table. Obtained with sizeof or storage_size. !! \param buf Buffer with data -!! \param errcode \fortran_error +!! \param errcode \fortran_error !! SUBROUTINE h5tbread_field_name_f(& #else @@ -700,7 +700,7 @@ CONTAINS !! \param type_size The size of the structure type, as calculated by sizeof or storage_size. !! \param buf Buffer with data. !! \param errcode \fortran_error -!! +!! SUBROUTINE h5tbwrite_field_index_f(& #else SUBROUTINE h5tbwrite_field_index_f_int(& @@ -776,7 +776,7 @@ CONTAINS !> !! \ingroup FH5TB !! -!! \brief Reads field. The fields are identified by index. +!! \brief Reads field. The fields are identified by index. !! !! \param loc_id Location identifier. The identifier may be that of a file or group. !! \param dset_name The name of the dataset to read. @@ -957,7 +957,7 @@ CONTAINS field_name,& errcode ) IMPLICIT NONE - INTEGER(hid_t), INTENT(in) :: loc_id + INTEGER(hid_t), INTENT(in) :: loc_id CHARACTER(LEN=*), INTENT(in) :: dset_name CHARACTER(LEN=*), INTENT(in) :: field_name INTEGER :: errcode @@ -995,7 +995,7 @@ CONTAINS !! \param dset_name The name of the dataset to read. !! \param nfields The number of fields. !! \param nrecords The number of records. -!! \param errcode \fortran_error +!! \param errcode \fortran_error !! SUBROUTINE h5tbget_table_info_f(loc_id,& dset_name,& @@ -1017,7 +1017,7 @@ CONTAINS IMPORT :: C_CHAR IMPORT :: HID_T, SIZE_T, HSIZE_T IMPLICIT NONE - INTEGER(hid_t), INTENT(in) :: loc_id + INTEGER(hid_t), INTENT(in) :: loc_id CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(in) :: dset_name INTEGER(hsize_t), INTENT(inout):: nfields INTEGER(hsize_t), INTENT(inout):: nrecords @@ -1043,7 +1043,7 @@ CONTAINS !! \param field_offsets An array containing the offsets of the fields. !! \param type_size The size of the HDF5 datatype associated with the table !! (i.e., the size in bytes of the HDF5 compound datatype used to define a row, or record, in the table). -!! \param errcode \fortran_error +!! \param errcode \fortran_error !! \param maxlen_out Maximum character length of the field names. !! SUBROUTINE h5tbget_field_info_f(loc_id,& diff --git a/hl/src/H5DOpublic.h b/hl/src/H5DOpublic.h index 13b2422..20f4c98 100644 --- a/hl/src/H5DOpublic.h +++ b/hl/src/H5DOpublic.h @@ -18,7 +18,11 @@ extern "C" { #endif -/**\defgroup H5DO Optimizations +/** \page H5DO_UG The HDF5 High Level Optimizations + * @todo Under Construction + */ + +/**\defgroup H5DO HDF5 Optimizations APIs (H5DO) * * Bypassing default HDF5 behavior in order to optimize for specific * use cases (H5DO) diff --git a/hl/src/H5DSpublic.h b/hl/src/H5DSpublic.h index 77dd64b..4fcf681 100644 --- a/hl/src/H5DSpublic.h +++ b/hl/src/H5DSpublic.h @@ -31,7 +31,11 @@ typedef herr_t (*H5DS_iterate_t)(hid_t dset, unsigned dim, hid_t scale, void *vi extern "C" { #endif -/**\defgroup H5DS Dimension Scales +/** \page H5DS_UG The HDF5 High Level Dimension Scales + * @todo Under Construction + */ + +/**\defgroup H5DS HDF5 Dimension Scales APIs (H5DS) * * Creating and manipulating HDF5 datasets that are associated with * the dimension of another HDF5 dataset (H5DS) @@ -77,21 +81,21 @@ extern "C" { /* THIS IS A NEW ROUTINE NOT ON OLD PORTAL */ /** - * -------------------------------------------------------------------------- - * \ingroup H5DS + * -------------------------------------------------------------------------- + * \ingroup H5DS * - * \brief Determines if new references are used with dimension scales. + * \brief Determines if new references are used with dimension scales. * - * \param[in] obj_id Object identifier - * \param[out] with_new_ref New references are used or not + * \param[in] obj_id Object identifier + * \param[out] with_new_ref New references are used or not * - * \return \herr_t + * \return \herr_t * - * \details H5DSwith_new_ref() takes any object identifier and checks - * if new references are used for dimension scales. Currently, - * new references are used when non-native VOL connector is - * used or when H5_DIMENSION_SCALES_WITH_NEW_REF is set up - * via configure option. + * \details H5DSwith_new_ref() takes any object identifier and checks + * if new references are used for dimension scales. Currently, + * new references are used when non-native VOL connector is + * used or when H5_DIMENSION_SCALES_WITH_NEW_REF is set up + * via configure option. * */ H5_HLDLL herr_t H5DSwith_new_ref(hid_t obj_id, hbool_t *with_new_ref); diff --git a/hl/src/H5IMpublic.h b/hl/src/H5IMpublic.h index b5426d6..bccf3c4 100644 --- a/hl/src/H5IMpublic.h +++ b/hl/src/H5IMpublic.h @@ -18,7 +18,11 @@ extern "C" { #endif -/**\defgroup H5IM Images +/** \page H5IM_UG The HDF5 High Level Images + * @todo Under Construction + */ + +/**\defgroup H5IM HDF5 Images API (H5IM) * * Creating and manipulating HDF5 datasets intended to be * interpreted as images (H5IM) @@ -27,7 +31,7 @@ extern "C" { * document: \ref IMG * This version of the API is primarily concerned with two dimensional raster * data similar to HDF4 Raster Images. - * The HDF5 Images API uses the \ref H5LT HDF5 API. + * The HDF5 Images API uses the \ref H5LT. * * \note \Bold{Programming hints:} * \note To use any of these functions or subroutines, diff --git a/hl/src/H5LDpublic.h b/hl/src/H5LDpublic.h index fed0c1c..363b59c 100644 --- a/hl/src/H5LDpublic.h +++ b/hl/src/H5LDpublic.h @@ -34,7 +34,7 @@ extern "C" { * It will return failure if \p cur_dims is NULL. * * \note See Also: - * \note Dataset Watch functions (used with \ref h5watch): + * \note Dataset Watch functions (used with h5watch): * - H5LDget_dset_dims() * - H5LDget_dset_elmts() * - H5LDget_dset_type_size() @@ -71,7 +71,7 @@ H5_HLDLL herr_t H5LDget_dset_dims(hid_t did, hsize_t *cur_dims); * conflict with these two separators. * * \note See Also: - * \note Dataset Watch functions (used with \ref h5watch): + * \note Dataset Watch functions (used with h5watch): * - H5LDget_dset_dims() * - H5LDget_dset_elmts() * - H5LDget_dset_type_size() @@ -123,7 +123,7 @@ H5_HLDLL size_t H5LDget_dset_type_size(hid_t did, const char *fields); * two separators. * * \note See Also: - * \note Dataset Watch functions (used with \ref h5watch): + * \note Dataset Watch functions (used with h5watch): * - H5LDget_dset_dims() * - H5LDget_dset_elmts() * - H5LDget_dset_type_size() diff --git a/hl/src/H5LTpublic.h b/hl/src/H5LTpublic.h index d1684fd..15cd845 100644 --- a/hl/src/H5LTpublic.h +++ b/hl/src/H5LTpublic.h @@ -35,7 +35,11 @@ typedef enum H5LT_lang_t { extern "C" { #endif -/**\defgroup H5LT Lite +/** \page H5LT_UG The HDF5 High Level Lite + * @todo Under Construction + */ + +/**\defgroup H5LT HDF5 Lite APIs (H5LT,H5LD) * Functions used to simplify creating and manipulating datasets, * attributes and other features (H5LT, H5LD) * @@ -57,87 +61,93 @@ extern "C" { * * * *
    + * * - Dataset Functions * - Make dataset functions - * - \ref H5LTmake_dataset - * - \ref H5LTmake_dataset_char - * - \ref H5LTmake_dataset_short - * - \ref H5LTmake_dataset_int - * - \ref H5LTmake_dataset_long - * - \ref H5LTmake_dataset_float - * - \ref H5LTmake_dataset_double - * - \ref H5LTmake_dataset_string + * - \ref H5LTmake_dataset + * - \ref H5LTmake_dataset_char + * - \ref H5LTmake_dataset_short + * - \ref H5LTmake_dataset_int + * - \ref H5LTmake_dataset_long + * - \ref H5LTmake_dataset_float + * - \ref H5LTmake_dataset_double + * - \ref H5LTmake_dataset_string * * - Read dataset functions - * - \ref H5LTread_dataset - * - \ref H5LTread_dataset_char - * - \ref H5LTread_dataset_short - * - \ref H5LTread_dataset_int - * - \ref H5LTread_dataset_long - * - \ref H5LTread_dataset_float - * - \ref H5LTread_dataset_double - * - \ref H5LTread_dataset_string + * - \ref H5LTread_dataset + * - \ref H5LTread_dataset_char + * - \ref H5LTread_dataset_short + * - \ref H5LTread_dataset_int + * - \ref H5LTread_dataset_long + * - \ref H5LTread_dataset_float + * - \ref H5LTread_dataset_double + * - \ref H5LTread_dataset_string * * - Query dataset functions - * - \ref H5LTfind_dataset - * - \ref H5LTget_dataset_ndims - * - \ref H5LTget_dataset_info + * - \ref H5LTfind_dataset + * - \ref H5LTget_dataset_ndims + * - \ref H5LTget_dataset_info * * - Dataset watch functions - * - \ref H5LDget_dset_dims - * - \ref H5LDget_dset_elmts - * - \ref H5LDget_dset_type_size + * - \ref H5LDget_dset_dims + * - \ref H5LDget_dset_elmts + * - \ref H5LDget_dset_type_size + * * + * * - Attribute Functions * - Set attribute functions - * - \ref H5LTset_attribute_string - * - \ref H5LTset_attribute_char - * - \ref H5LTset_attribute_uchar - * - \ref H5LTset_attribute_short - * - \ref H5LTset_attribute_ushort - * - \ref H5LTset_attribute_int - * - \ref H5LTset_attribute_uint - * - \ref H5LTset_attribute_long - * - \ref H5LTset_attribute_long_long - * - \ref H5LTset_attribute_ulong - * - \ref H5LTset_attribute_ullong - * - \ref H5LTset_attribute_float - * - \ref H5LTset_attribute_double - * - H5LTset_attribute_f (fortran ONLY) + * - \ref H5LTset_attribute_string + * - \ref H5LTset_attribute_char + * - \ref H5LTset_attribute_uchar + * - \ref H5LTset_attribute_short + * - \ref H5LTset_attribute_ushort + * - \ref H5LTset_attribute_int + * - \ref H5LTset_attribute_uint + * - \ref H5LTset_attribute_long + * - \ref H5LTset_attribute_long_long + * - \ref H5LTset_attribute_ulong + * - \ref H5LTset_attribute_ullong + * - \ref H5LTset_attribute_float + * - \ref H5LTset_attribute_double + * - H5LTset_attribute_f (fortran ONLY) * * - Get attribute functions - * - \ref H5LTget_attribute - * - \ref H5LTget_attribute_string - * - \ref H5LTget_attribute_char - * - \ref H5LTget_attribute_uchar - * - \ref H5LTget_attribute_short - * - \ref H5LTget_attribute_ushort - * - \ref H5LTget_attribute_int - * - \ref H5LTget_attribute_uint - * - \ref H5LTget_attribute_long - * - \ref H5LTget_attribute_long_long - * - \ref H5LTget_attribute_ulong - * - \ref H5LTget_attribute_ullong - * - \ref H5LTget_attribute_float - * - \ref H5LTget_attribute_double + * - \ref H5LTget_attribute + * - \ref H5LTget_attribute_string + * - \ref H5LTget_attribute_char + * - \ref H5LTget_attribute_uchar + * - \ref H5LTget_attribute_short + * - \ref H5LTget_attribute_ushort + * - \ref H5LTget_attribute_int + * - \ref H5LTget_attribute_uint + * - \ref H5LTget_attribute_long + * - \ref H5LTget_attribute_long_long + * - \ref H5LTget_attribute_ulong + * - \ref H5LTget_attribute_ullong + * - \ref H5LTget_attribute_float + * - \ref H5LTget_attribute_double * * - Query attribute functions - * - \ref H5LTfind_attribute - * - \ref H5LTget_attribute_info - * - \ref H5LTget_attribute_ndims + * - \ref H5LTfind_attribute + * - \ref H5LTget_attribute_info + * - \ref H5LTget_attribute_ndims + * * + * * - Datatype Functions * - Datatype translation functions - * - \ref H5LTtext_to_dtype - * - \ref H5LTdtype_to_text + * - \ref H5LTtext_to_dtype + * - \ref H5LTdtype_to_text * * - File image function * - Open file image function - * - \ref H5LTopen_file_image + * - \ref H5LTopen_file_image * * - Path and object function * - Query path and object function - * - \ref H5LTpath_valid + * - \ref H5LTpath_valid + * *
    * @@ -1510,7 +1520,7 @@ H5_HLDLL herr_t H5LTfind_attribute(hid_t loc_id, const char *name); * indicating the file’s root group, followed by the members * - A relative path with respect to \p loc_id * - A dot (\c .), if \p loc_id is the object identifier for - * the object itself + * the object itself. * * If \p path is an absolute path, then \p loc_id can be an * identifier for any object in the file as it is used only to diff --git a/hl/src/H5PTpublic.h b/hl/src/H5PTpublic.h index 04741ac..6552aa9 100644 --- a/hl/src/H5PTpublic.h +++ b/hl/src/H5PTpublic.h @@ -18,7 +18,11 @@ extern "C" { #endif -/**\defgroup H5PT Packet Table +/** \page H5PT_UG The HDF5 High Level Packet Table + * @todo Under Construction + */ + +/**\defgroup H5PT HDF5 Packet Table APIs (H5PT) * * Creating and manipulating HDF5 datasets to support append- * and read-only operations on table data (H5PT) @@ -210,7 +214,7 @@ H5_HLDLL herr_t H5PTclose(hid_t table_id); * Level 0 is faster but offers the least compression; * level 9 is slower but offers maximum compression. * A setting of -1 indicates that no compression is desired. - + * */ /* This function may be removed from the packet table in release 1.8.19. */ H5_HLDLL hid_t H5PTcreate_fl(hid_t loc_id, const char *dset_name, hid_t dtype_id, hsize_t chunk_size, diff --git a/hl/src/H5TBpublic.h b/hl/src/H5TBpublic.h index 9ad8e08..44b122c 100644 --- a/hl/src/H5TBpublic.h +++ b/hl/src/H5TBpublic.h @@ -18,7 +18,11 @@ extern "C" { #endif -/**\defgroup H5TB Table +/** \page H5TB_UG The HDF5 High Level Table + * @todo Under Construction + */ + +/**\defgroup H5TB HDF5 Table APIs (H5TB) * * Creating and manipulating HDF5 datasets intended to be * interpreted as tables (H5TB) @@ -656,12 +660,10 @@ H5_HLDLL herr_t H5TBAget_title(hid_t loc_id, char *table_title); * \return A return value of 0 indicates a fill value is not present. * \return A return value <0 indicates an error. * - * * \details H5TBget_fill() reads the table attribute fill values into * the buffer \p dst_buf for the table specified by \p dset_id * and \p dset_name located in \p loc_id. * - * * \par Example * \include H5TBAget_fill.c * diff --git a/java/examples/groups/H5Ex_G_Visit.java b/java/examples/groups/H5Ex_G_Visit.java index d14ded6..1f2f9a1 100644 --- a/java/examples/groups/H5Ex_G_Visit.java +++ b/java/examples/groups/H5Ex_G_Visit.java @@ -15,7 +15,7 @@ using H5Ovisit and H5Lvisit. The program prints all of the objects in the file specified in FILE, then prints all of the links in that file. The default file used by this - example implements the structure described in the User's + example implements the structure described in the User Guide, chapter 4, figure 26. ************************************************************/ package examples.groups; diff --git a/java/src/Makefile.am b/java/src/Makefile.am index 5bb72ad..0076932 100644 --- a/java/src/Makefile.am +++ b/java/src/Makefile.am @@ -108,7 +108,6 @@ hdf5_java_JAVA = \ ${pkgpath}/structs/H5AC_cache_config_t.java \ ${pkgpath}/H5.java \ ${pkgpath}/HDF5Constants.java \ - ${pkgpath}/HDF5GroupInfo.java \ ${pkgpath}/HDFArray.java \ ${pkgpath}/HDFNativeData.java @@ -124,7 +123,7 @@ DOCTITLE = '

    HDF5 Java Wrapper

    ' SRCDIR = '$(pkgpath)' docs: - $(JAVADOC) -sourcepath $(srcdir) -d javadoc -use -splitIndex -windowtitle $(WINDOWTITLE) -doctitle $(DOCTITLE) -J-Xmx180m -verbose -overview $(top_srcdir)/java/src/hdf/overview.html -classpath $(CLASSPATH_ENV) hdf.hdf5lib + $(JAVADOC) -sourcepath $(srcdir) -d javadoc -Xdoclint:none -use -splitIndex -windowtitle $(WINDOWTITLE) -doctitle $(DOCTITLE) -J-Xmx180m -verbose -overview $(top_srcdir)/java/src/hdf/overview.html -classpath $(CLASSPATH_ENV) hdf.hdf5lib CLEANFILES = classhdf5_java.stamp $(jarfile) $(JAVAROOT)/$(pkgpath)/callbacks/*.class $(JAVAROOT)/$(pkgpath)/exceptions/*.class $(JAVAROOT)/$(pkgpath)/structs/*.class $(JAVAROOT)/$(pkgpath)/*.class diff --git a/java/src/hdf/hdf5lib/CMakeLists.txt b/java/src/hdf/hdf5lib/CMakeLists.txt index 7c9cc4b..4fb0e0a 100644 --- a/java/src/hdf/hdf5lib/CMakeLists.txt +++ b/java/src/hdf/hdf5lib/CMakeLists.txt @@ -101,7 +101,6 @@ set (HDF5_JAVADOC_HDF_HDF5_STRUCTS_SOURCES set (HDF5_JAVA_HDF_HDF5_SOURCES HDFArray.java HDF5Constants.java - HDF5GroupInfo.java HDFNativeData.java H5.java ) diff --git a/java/src/hdf/hdf5lib/H5.java b/java/src/hdf/hdf5lib/H5.java index 98589d9..f06163e 100644 --- a/java/src/hdf/hdf5lib/H5.java +++ b/java/src/hdf/hdf5lib/H5.java @@ -59,55 +59,55 @@ import hdf.hdf5lib.structs.H5O_native_info_t; import hdf.hdf5lib.structs.H5O_token_t; /** + * @page HDF5LIB HDF5 Java API Package * This class is the Java interface for the HDF5 library. *

    * This code is the called by Java programs to access the entry points of the HDF5 library. Each routine wraps * a single HDF5 entry point, generally with the arguments and return codes analogous to the C interface. *

    - * For details of the HDF5 library, see the HDF5 Documentation at: - * http://hdfgroup.org/HDF5/ + * For details of the HDF5 library, @see @ref RM *


    *

    * Mapping of arguments for Java * *

    * In general, arguments to the HDF Java API are straightforward translations from the 'C' API described in - * the HDF Reference Manual. + * the @ref RM. * * - * + * * - * + * * * * - * + * * * * - * + * * * * - * + * * * * - * + * * * * - * + * * * * - * + * * * * - * - * + * * *
    HDF-5 C types to Java types HDF5 C types to Java types
    HDF-5HDF5Java
    H5T_NATIVE_INT@ref H5T_NATIVE_INTint, Integer
    H5T_NATIVE_SHORT@ref H5T_NATIVE_SHORTshort, Short
    H5T_NATIVE_FLOAT@ref H5T_NATIVE_FLOATfloat, Float
    H5T_NATIVE_DOUBLE@ref H5T_NATIVE_DOUBLEdouble, Double
    H5T_NATIVE_CHAR@ref H5T_NATIVE_CHARbyte, Byte
    H5T_C_S1@ref H5T_C_S1java.lang.String
    void *
    + *
    void *
    * (i.e., pointer to `Any')
    Special -- see HDFArraySpecial -- see @ref HDFARRAY
    * General Rules for Passing Arguments and Results @@ -116,17 +116,17 @@ import hdf.hdf5lib.structs.H5O_token_t; * for arrays, which are discussed below. *

    * The return value of Java methods is also the analogous type, as above. A major exception to that - * rule is that all HDF functions that return SUCCEED/FAIL are declared boolean in the Java version, - * rather than int as in the C. Functions that return a value or else FAIL are declared the + * rule is that all HDF Java functions will raise an exception upon failure in the Java version, + * rather than just return int as in the C. Functions that return a value are declared * equivalent to the C function. * However, in most cases the Java method will raise an exception instead of returning an error code. - * See Errors and Exceptions below. + * @see @ref ERRORS. *

    * Java does not support pass by reference of arguments, so arguments that are returned through OUT * parameters must be wrapped in an object or array. The Java API for HDF consistently wraps arguments in - * arrays. + * arrays. Where possible the Java function may return the OUT parameter as an object or basic type. *

    - * For instance, a function that returns two integers is declared: + * For instance, a function that returns two integers declared as: * *

      *       h_err_t HDF5dummy( int *a1, int *a2)
    @@ -137,26 +137,34 @@ import hdf.hdf5lib.structs.H5O_token_t;
      * 
      * public synchronized static native int HDF5dummy(int args[]);
      * 
    + * OR + *
    + * public synchronized static native int[] HDF5dummy();
    + * 
    * * where a1 is args[0] and a2 is args[1], and would be invoked: * *
      * H5.HDF5dummy(a);
      * 
    + * OR + *
    + * a = H5.HDF5dummy();
    + * 
    * *

    * All the routines where this convention is used will have specific documentation of the details, given * below. *

    - * Arrays + * @ref HDFARRAY *

    * HDF5 needs to read and write multi-dimensional arrays of any number type (and records). The HDF5 API * describes the layout of the source and destination, and the data for the array passed as a block of * bytes, for instance, * - *

    - *      herr_t H5Dread(long fid, long filetype, long memtype, long memspace, void * data);
    - * 
    + * @code + * herr_t H5Dread(long fid, long filetype, long memtype, long memspace, void *data); + * @endcode * *

    * where ``void *'' means that the data may be any valid numeric type, and is a contiguous block of bytes that @@ -166,7 +174,7 @@ import hdf.hdf5lib.structs.H5O_token_t; * For Java, this ``ANY'' is a problem, as the type of data must always be declared. Furthermore, * multidimensional arrays are definitely not laid out contiguously in memory. It would be infeasible * to declare a separate routine for every combination of number type and dimensionality. For that reason, the - * HDFArray class is used to discover the type, shape, and + * @ref HDFARRAY HDFArray class is used to discover the type, shape, and * size of the data array at run time, and to convert to and from a contiguous array of bytes in synchronized * static native C order. *

    @@ -174,88 +182,107 @@ import hdf.hdf5lib.structs.H5O_token_t; * passed as an ``Object'', and the Java API will translate to and from the appropriate packed array of bytes * needed by the C library. So the function above would be declared: * - *

    - * public synchronized static native int H5Dread(long fid, long filetype, long memtype, long memspace,
    - * Object data);
    - * 
    - * OPEN_IDS.addElement(id); - + * @code + * public synchronized static int H5Dread(long dataset_id, long mem_type_id, long mem_space_id, + * long file_space_id, long xfer_plist_id, Object obj, + * boolean isCriticalPinning) + * throws HDF5Exception, HDF5LibraryException, NullPointerException; + * @endcode + * * and the parameter data can be any multi-dimensional array of numbers, such as float[][], or * int[][][], or Double[][]. *

    - * HDF-5 Constants + * @ref HDF5CONST *

    - * The HDF-5 API defines a set of constants and enumerated values. Most of these values are available to Java - * programs via the class HDF5Constants. For example, + * The HDF5 API defines a set of constants and enumerated values. Most of these values are available to Java + * programs via the class @ref HDF5CONST HDF5Constants. For example, * the parameters for the h5open() call include two numeric values, HDFConstants.H5F_ACC_RDWR * and HDF5Constants.H5P_DEFAULT. * As would be expected, these numbers correspond to the C constants - * H5F_ACC_RDWR and H5P_DEFAULT. + * #H5F_ACC_RDWR and #H5P_DEFAULT. *

    - * The HDF-5 API defines a set of values that describe number types and sizes, such as "H5T_NATIVE_INT" and - * "hsize_t". These values are determined at run time by the HDF-5 C library. To support these parameters, - * the Java class HDF5CDataTypes looks up the values + * The HDF5 API defines a set of values that describe number types and sizes, such as "H5T_NATIVE_INT" and + * "hsize_t". These values are determined at run time by the HDF5 C library. To support these parameters, + * the Java HDFConstants class looks up the values * when initiated. The values can be accessed as public variables of the Java class, such as: * - *

    - * long data_type = HDF5CDataTypes.JH5T_NATIVE_INT;
    - * 
    + * @code + * long data_type = HDFConstants.H5T_NATIVE_INT; + * @endcode * * The Java application uses both types of constants the same way, the only difference is that the - * HDF5CDataTypes may have different values on different platforms. + * HDFConstants may have different values on different platforms. *

    - * Error handling and Exceptions + * @ref ERRORS *

    - * The HDF5 error API (H5E) manages the behavior of the error stack in the HDF-5 library. This API is omitted - * from the JHI5. Errors are converted into Java exceptions. This is totally different from the C interface, - * but is very natural for Java programming. - *

    - * The exceptions of the JHI5 are organized as sub-classes of the class - * HDF5Exception. There are two subclasses - * of - * HDF5Exception, - HDF5LibraryException - * and HDF5JavaException. The - * sub-classes of the former represent errors from the HDF-5 C library, while sub-classes of the latter + * The HDF5 error API (@ref H5E) manages the behavior of the error stack in the HDF5 library. This API is + * omitted from the JHI5. Errors are converted into Java exceptions. This is totally different from the C + * interface, but is very natural for Java programming.

    The exceptions of the JHI5 are organized as + * sub-classes of the class + * @ref ERRORS HDF5Exception. There are two subclasses of + * HDF5Exception, @ref ERRORSLIB HDF5LibraryException + * and @ref ERRORSJAVA HDF5JavaException. The + * sub-classes of the former represent errors from the HDF5 C library, while sub-classes of the latter * represent errors in the JHI5 wrapper and support code. *

    * The super-class HDF5LibraryException implements the method 'printStackTrace()', - * which prints out the HDF-5 error stack, as described in the HDF-5 C API H5Eprint(). This may - * be used by Java exception handlers to print out the HDF-5 error stack. - *


    + * which prints out the HDF5 error stack, as described in the HDF5 C API @ref H5Eprint(). This + * may be used by Java exception handlers to print out the HDF5 error stack.
    * * @version HDF5 1.13.3
    - * See also: hdf.hdf5lib.HDFArray
    - * hdf.hdf5lib.HDF5Constants
    - * hdf.hdf5lib.HDF5CDataTypes
    - * hdf.hdf5lib.HDF5Exception
    - * http://hdfgroup.org/HDF5" + * See also: + * @ref HDFARRAY hdf.hdf5lib.HDFArray
    + * @ref HDF5CONST hdf.hdf5lib.HDF5Constants
    + * @ref ERRORS hdf.hdf5lib.HDF5Exception
    + * HDF5 + * + * For details of the HDF5 library, @see @ref RM + */ + +/** + * This class is the Java interface for the HDF5 library. + * + * @defgroup JH5 HDF5 Library Java Interface + * + * This code is the called by Java programs to access the entry points of the HDF5 library. Each routine wraps + * a single HDF5 entry point, generally with the arguments and return codes analogous to the C interface. + * + * @see H5, C-API + * + * @see @ref H5_UG, User Guide * */ public class H5 implements java.io.Serializable { /** - * + * Serialization ID */ private static final long serialVersionUID = 6129888282117053288L; private final static org.slf4j.Logger log = org.slf4j.LoggerFactory.getLogger(H5.class); /** - * The version number of the HDF5 library: - * LIB_VERSION[0]: The major version of the library. - * LIB_VERSION[1]: The minor version of the library. - * LIB_VERSION[2]: The release number of the library. + * @ingroup JH5 * + * The version number of the HDF5 library: + *
      + *
    • LIB_VERSION[0]: The major version of the library.
    • + *
    • LIB_VERSION[1]: The minor version of the library.
    • + *
    • LIB_VERSION[2]: The release number of the library.
    • + *
    * Make sure to update the versions number when a different library is used. */ - public final static int LIB_VERSION[] = {1, 13, 2}; + public final static int LIB_VERSION[] = {1, 13, 3}; /** + * @ingroup JH5 + * * add system property to load library by path */ public final static String H5PATH_PROPERTY_KEY = "hdf.hdf5lib.H5.hdf5lib"; /** + * @ingroup JH5 + * * add system property to load library by name from library path, via System.loadLibrary() */ public final static String H5_LIBRARY_NAME_PROPERTY_KEY = "hdf.hdf5lib.H5.loadLibraryName"; @@ -268,6 +295,8 @@ public class H5 implements java.io.Serializable { static { loadH5Lib(); } /** + * @ingroup JH5 + * * load native library */ public static void loadH5Lib() @@ -372,6 +401,8 @@ public class H5 implements java.io.Serializable { // //////////////////////////////////////////////////////////// /** + * @ingroup JH5 + * * Get number of open IDs. * * @return Returns a count of open IDs @@ -379,6 +410,8 @@ public class H5 implements java.io.Serializable { public final static int getOpenIDCount() { return OPEN_IDS.size(); } /** + * @ingroup JH5 + * * Get the open IDs * * @return Returns a collection of open IDs @@ -386,6 +419,8 @@ public class H5 implements java.io.Serializable { public final static Collection getOpenIDs() { return OPEN_IDS; } /** + * @ingroup JH5 + * * H5check_version verifies that the arguments match the version numbers compiled into the library. * * @param majnum @@ -397,47 +432,55 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful. Upon failure (when the versions do not match), this * function causes the application to abort (i.e., crash) * - * See C API function: herr_t H5check_version() + * See C API function: @ref herr_t H5check_version(unsigned majnum, unsigned minnum, unsigned relnum) **/ public synchronized static native int H5check_version(int majnum, int minnum, int relnum); /** + * @ingroup JH5 + * * H5close flushes all data to disk, closes all file identifiers, and cleans up all memory used by the * library. * * @return a non-negative value if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native int H5close() throws HDF5LibraryException; /** + * @ingroup JH5 + * * H5open initialize the library. * * @return a non-negative value if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native int H5open() throws HDF5LibraryException; /** + * @ingroup JH5 + * * H5dont_atexit indicates to the library that an atexit() cleanup routine should not be installed. In * order to be effective, this routine must be called before any other HDF function calls, and must be * called each time the library is loaded/linked into the application (the first time and after it's been - * unloaded).

    This is called by the static initializer, so this should never need to be explicitly + * unloaded).

    This is called by the static initializer, so this should never need to be explicitly * called by a Java program. * * @return a non-negative value if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ private synchronized static native int H5dont_atexit() throws HDF5LibraryException; /** - * Turn off error handling. By default, the C library prints the error stack of the HDF-5 C library on + * @ingroup JH5 + * + * Turn off error handling. By default, the C library prints the error stack of the HDF5 C library on * stdout. This behavior may be disabled by calling H5error_off(). * * @return a non-negative value if successful @@ -445,29 +488,35 @@ public class H5 implements java.io.Serializable { public synchronized static native int H5error_off(); /** - * Turn on error handling. By default, the C library prints the error stack of the HDF-5 C library on + * @ingroup JH5 + * + * Turn on error handling. By default, the C library prints the error stack of the HDF5 C library on * stdout. This behavior may be re-enabled by calling H5error_on(). */ public synchronized static native void H5error_on(); /** + * @ingroup JH5 + * * H5garbage_collect collects on all free-lists of all types. * * @return a non-negative value if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native int H5garbage_collect() throws HDF5LibraryException; /** + * @ingroup JH5 + * * H5get_libversion retrieves the major, minor, and release numbers of the version of the HDF library * which is linked to the application. * * @param libversion * The version information of the HDF library. * - *

    +     * 
          *      libversion[0] = The major version of the library.
          *      libversion[1] = The minor version of the library.
          *      libversion[2] = The release number of the library.
    @@ -475,11 +524,13 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful, along with the version information.
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          **/
         public synchronized static native int H5get_libversion(int[] libversion) throws HDF5LibraryException;
     
         /**
    +     * @ingroup JH5
    +     *
          * H5set_free_list_limits
          *      Sets limits on the different kinds of free lists.  Setting a value
          *      of -1 for a limit means no limit of that type.  These limits are global
    @@ -506,7 +557,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful, along with the version information.
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          */
         public synchronized static native int H5set_free_list_limits(int reg_global_lim, int reg_list_lim,
                                                                      int arr_global_lim, int arr_list_lim,
    @@ -514,6 +565,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException;
     
         /**
    +     * @ingroup JH5
    +     *
          * H5export_dataset is a utility function to save data in a file.
          *
          * @param file_export_name
    @@ -529,13 +582,15 @@ public class H5 implements java.io.Serializable {
          *            3 - export data as binary Big Endian.
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          **/
         public synchronized static native void H5export_dataset(String file_export_name, long file_id,
                                                                 String object_path, int binary_order)
             throws HDF5LibraryException;
     
         /**
    +     * @ingroup JH5
    +     *
          * H5export_attribute is a utility function to save data in a file.
          *
          * @param file_export_name
    @@ -551,13 +606,15 @@ public class H5 implements java.io.Serializable {
          *            3 - export data as binary Big Endian.
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          **/
         public synchronized static native void H5export_attribute(String file_export_name, long dataset_id,
                                                                   String attribute_name, int binary_order)
             throws HDF5LibraryException;
     
         /**
    +     * @ingroup JH5
    +     *
          * H5is_library_threadsafe Checks to see if the library was built with thread-safety enabled.
          *
          * @return true if hdf5 library implements threadsafe
    @@ -572,11 +629,23 @@ public class H5 implements java.io.Serializable {
     
         // ////////////////////////////////////////////////////////////
         // //
    -    // H5A: HDF5 1.8 Attribute Interface API Functions //
    +    // H5A: HDF5 Attribute Interface API Functions //
         // //
         // ////////////////////////////////////////////////////////////
    +    /**
    +     * @defgroup JH5A Java Attribute (H5A) Interface
    +     *
    +     * An HDF5 attribute is a small metadata object describing the nature and/or intended usage of a primary
    +     *data object. A primary data object may be a dataset, group, or committed datatype.
    +     *
    +     * @see H5A, C-API
    +     *
    +     * @see @ref H5A_UG, User Guide
    +     **/
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Aclose terminates access to the attribute specified by its identifier, attr_id.
          *
          * @param attr_id
    @@ -585,7 +654,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          **/
         public static int H5Aclose(long attr_id) throws HDF5LibraryException
         {
    @@ -601,6 +670,8 @@ public class H5 implements java.io.Serializable {
         private synchronized static native int _H5Aclose(long attr_id) throws HDF5LibraryException;
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Acopy copies the content of one attribute to another.
          *
          * @param src_aid
    @@ -611,11 +682,13 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          */
         public synchronized static native int H5Acopy(long src_aid, long dst_aid) throws HDF5LibraryException;
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Acreate creates an attribute, attr_name, which is attached to the object specified by the identifier
          * loc_id.
          *
    @@ -635,7 +708,7 @@ public class H5 implements java.io.Serializable {
          * @return An attribute identifier if successful; otherwise returns a negative value.
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            Name is null.
          **/
    @@ -652,6 +725,8 @@ public class H5 implements java.io.Serializable {
         }
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Acreate2 an attribute, attr_name, which is attached to the object specified by the identifier loc_id.
          *
          * @see public static long H5Acreate( long loc_id, String attr_name, long type_id, long space_id, long
    @@ -662,6 +737,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Acreate_by_name creates an attribute, attr_name, which is attached to the object specified by loc_id
          * and obj_name.
          *
    @@ -685,7 +762,7 @@ public class H5 implements java.io.Serializable {
          * @return An attribute identifier if successful; otherwise returns a negative value.
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            name is null.
          **/
    @@ -709,6 +786,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Adelete removes the attribute specified by its name, name, from a dataset, group, or named datatype.
          *
          * @param loc_id
    @@ -719,7 +798,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            name is null.
          **/
    @@ -727,6 +806,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Adelete_by_idx removes an attribute, specified by its location in an index, from an object.
          *
          * @param loc_id
    @@ -743,7 +824,7 @@ public class H5 implements java.io.Serializable {
          *            IN: Link access property list identifier
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            obj_name is null.
          **/
    @@ -752,6 +833,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Adelete_by_name removes the attribute attr_name from an object specified by location and name, loc_id
          * and obj_name, respectively.
          *
    @@ -767,7 +850,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful; otherwise returns a negative value.
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            name is null.
          **/
    @@ -776,6 +859,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Aexists determines whether the attribute attr_name exists on the object specified by obj_id.
          *
          * @param obj_id
    @@ -786,7 +871,7 @@ public class H5 implements java.io.Serializable {
          * @return boolean true if an attribute with a given name exists.
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            attr_name is null.
          **/
    @@ -794,6 +879,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Aexists_by_name determines whether the attribute attr_name exists on an object. That object is
          * specified by its location and name, loc_id and obj_name, respectively.
          *
    @@ -809,7 +896,7 @@ public class H5 implements java.io.Serializable {
          * @return boolean true if an attribute with a given name exists, otherwise returns false.
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            name is null.
          **/
    @@ -818,6 +905,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Aget_info retrieves attribute information, by attribute identifier.
          *
          * @param attr_id
    @@ -826,11 +915,13 @@ public class H5 implements java.io.Serializable {
          * @return A buffer(H5A_info_t) for Attribute information
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          **/
         public synchronized static native H5A_info_t H5Aget_info(long attr_id) throws HDF5LibraryException;
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Aget_info_by_idx Retrieves attribute information, by attribute index position.
          *
          * @param loc_id
    @@ -849,7 +940,7 @@ public class H5 implements java.io.Serializable {
          * @return A buffer(H5A_info_t) for Attribute information
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            obj_name is null.
          **/
    @@ -859,6 +950,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Aget_info_by_name Retrieves attribute information, by attribute name.
          *
          * @param loc_id
    @@ -873,7 +966,7 @@ public class H5 implements java.io.Serializable {
          * @return A buffer(H5A_info_t) for Attribute information
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            obj_name is null.
          **/
    @@ -882,6 +975,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Aget_name retrieves the name of an attribute specified by the identifier, attr_id.
          *
          * @param attr_id
    @@ -890,11 +985,13 @@ public class H5 implements java.io.Serializable {
          * @return String for Attribute name.
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          **/
         public synchronized static native String H5Aget_name(long attr_id) throws HDF5LibraryException;
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Aget_name_by_idx retrieves the name of an attribute that is attached to an object, which is specified
          * by its location and name, loc_id and obj_name, respectively.
          *
    @@ -923,6 +1020,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Aget_space retrieves a copy of the dataspace for an attribute.
          *
          * @param attr_id
    @@ -931,7 +1030,7 @@ public class H5 implements java.io.Serializable {
          * @return attribute dataspace identifier if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          **/
         public static long H5Aget_space(long attr_id) throws HDF5LibraryException
         {
    @@ -947,6 +1046,8 @@ public class H5 implements java.io.Serializable {
         private synchronized static native long _H5Aget_space(long attr_id) throws HDF5LibraryException;
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Aget_storage_size returns the amount of storage that is required for the specified attribute,
          * attr_id.
          *
    @@ -956,11 +1057,13 @@ public class H5 implements java.io.Serializable {
          * @return the amount of storage size allocated for the attribute; otherwise returns 0 (zero)
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          **/
         public synchronized static native long H5Aget_storage_size(long attr_id) throws HDF5LibraryException;
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Aget_type retrieves a copy of the datatype for an attribute.
          *
          * @param attr_id
    @@ -969,7 +1072,7 @@ public class H5 implements java.io.Serializable {
          * @return a datatype identifier if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          **/
         public static long H5Aget_type(long attr_id) throws HDF5LibraryException
         {
    @@ -985,6 +1088,8 @@ public class H5 implements java.io.Serializable {
         private synchronized static native long _H5Aget_type(long attr_id) throws HDF5LibraryException;
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Aopen opens an existing attribute, attr_name, that is attached to an object specified an object
          * identifier, object_id.
          *
    @@ -998,7 +1103,7 @@ public class H5 implements java.io.Serializable {
          * @return An attribute identifier if successful; otherwise returns a negative value.
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            Name is null.
          **/
    @@ -1018,6 +1123,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Aopen_by_idx opens an existing attribute that is attached to an object specified by location and
          * name, loc_id and obj_name, respectively
          *
    @@ -1039,7 +1146,7 @@ public class H5 implements java.io.Serializable {
          * @return An attribute identifier if successful; otherwise returns a negative value.
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            Name is null.
          **/
    @@ -1061,6 +1168,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Aopen_by_name Opens an attribute for an object by object name and attribute name
          *
          * @param loc_id
    @@ -1077,7 +1186,7 @@ public class H5 implements java.io.Serializable {
          * @return Returns an attribute identifier if successful; otherwise returns a negative value.
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            obj_name is null.
          **/
    @@ -1098,6 +1207,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Aread reads an attribute, specified with attr_id. The attribute's memory datatype is specified with
          * mem_type_id. The entire attribute is read into buffer from the file.
          *
    @@ -1113,7 +1224,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            data buffer is null.
          **/
    @@ -1122,6 +1233,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Aread reads an attribute, specified with attr_id. The attribute's memory datatype is specified with
          * mem_type_id. The entire attribute is read into buffer from the file.
          *
    @@ -1135,7 +1248,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            data buffer is null.
          **/
    @@ -1146,6 +1259,8 @@ public class H5 implements java.io.Serializable {
         }
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Aread reads an attribute, specified with attr_id. The attribute's memory datatype is specified with
          * mem_type_id. The entire attribute is read into buffer from the file.
          *
    @@ -1159,7 +1274,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            data buffer is null.
          **/
    @@ -1170,6 +1285,8 @@ public class H5 implements java.io.Serializable {
         }
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Aread reads an attribute, specified with attr_id. The attribute's memory datatype is specified with
          * mem_type_id. The entire attribute is read into data object from the file.
          *
    @@ -1187,7 +1304,7 @@ public class H5 implements java.io.Serializable {
          * @exception HDF5Exception
          *            Failure in the data conversion.
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            data buffer is null. See public synchronized static native int H5Aread( )
          **/
    @@ -1269,6 +1386,8 @@ public class H5 implements java.io.Serializable {
         }
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Aread reads an attribute, specified with attr_id. The attribute's memory datatype is specified with
          * mem_type_id. The entire attribute is read into buffer of double from the file.
          *
    @@ -1284,7 +1403,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            data buffer is null.
          **/
    @@ -1293,6 +1412,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Aread reads an attribute, specified with attr_id. The attribute's memory datatype is specified with
          * mem_type_id. The entire attribute is read into buffer of double from the file.
          *
    @@ -1306,7 +1427,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            data buffer is null.
          **/
    @@ -1332,7 +1453,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            data buffer is null.
          **/
    @@ -1341,6 +1462,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Aread reads an attribute, specified with attr_id. The attribute's memory datatype is specified with
          * mem_type_id. The entire attribute is read into buffer of float from the file.
          *
    @@ -1354,7 +1477,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            data buffer is null.
          **/
    @@ -1365,6 +1488,8 @@ public class H5 implements java.io.Serializable {
         }
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Aread reads an attribute, specified with attr_id. The attribute's memory datatype is specified with
          * mem_type_id. The entire attribute is read into buffer of int from the file.
          *
    @@ -1380,7 +1505,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            data buffer is null.
          **/
    @@ -1389,6 +1514,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Aread reads an attribute, specified with attr_id. The attribute's memory datatype is specified with
          * mem_type_id. The entire attribute is read into buffer of int from the file.
          *
    @@ -1402,7 +1529,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            data buffer is null.
          **/
    @@ -1413,6 +1540,8 @@ public class H5 implements java.io.Serializable {
         }
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Aread reads an attribute, specified with attr_id. The attribute's memory datatype is specified with
          * mem_type_id. The entire attribute is read into buffer of long from the file.
          *
    @@ -1428,7 +1557,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            data buffer is null.
          **/
    @@ -1437,6 +1566,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Aread reads an attribute, specified with attr_id. The attribute's memory datatype is specified with
          * mem_type_id. The entire attribute is read into buffer of long from the file.
          *
    @@ -1450,7 +1581,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            data buffer is null.
          **/
    @@ -1461,6 +1592,8 @@ public class H5 implements java.io.Serializable {
         }
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Aread reads an attribute, specified with attr_id. The attribute's memory datatype is specified with
          * mem_type_id. The entire attribute is read into buffer of String from the file.
          *
    @@ -1474,7 +1607,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            data buffer is null.
          **/
    @@ -1482,6 +1615,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Aread reads an attribute, specified with attr_id. The attribute's memory datatype is specified with
          * mem_type_id. The entire attribute is read into buffer of short from the file.
          *
    @@ -1497,7 +1632,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            data buffer is null.
          **/
    @@ -1506,6 +1641,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Aread reads an attribute, specified with attr_id. The attribute's memory datatype is specified with
          * mem_type_id. The entire attribute is read into buffer  of shortfrom the file.
          *
    @@ -1519,7 +1656,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            data buffer is null.
          **/
    @@ -1530,6 +1667,8 @@ public class H5 implements java.io.Serializable {
         }
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Aread reads an attribute, specified with attr_id. The attribute's memory datatype is specified with
          * mem_type_id. The entire attribute is read into buffer of variable-lenght from the file.
          *
    @@ -1543,7 +1682,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            data buffer is null.
          **/
    @@ -1551,6 +1690,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Aread reads an attribute, specified with attr_id. The attribute's memory datatype is specified with
          * mem_type_id. The entire attribute is read into buffer of String from the file.
          *
    @@ -1564,7 +1705,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            data buffer is null.
          **/
    @@ -1572,6 +1713,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Aread reads an attribute, specified with attr_id. The attribute's memory datatype is specified with
          * mem_type_id. The entire attribute is read into buffer of variable-lenght strings from the file.
          *
    @@ -1585,7 +1728,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *             data buffer is null.
          **/
    @@ -1593,6 +1736,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Aread reads an attribute, specified with attr_id. The attribute's memory datatype is specified with
          * mem_type_id. The entire attribute is read into buffer of string from the file.
          *
    @@ -1606,7 +1751,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            data buffer is null.
          **/
    @@ -1614,8 +1759,10 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Arename changes the name of attribute that is attached to the object specified by loc_id. The
    -     *attribute named old_attr_name is renamed new_attr_name.
    +     * attribute named old_attr_name is renamed new_attr_name.
          *
          * @param loc_id
          *            IN: Location or object identifier; may be dataset or group
    @@ -1627,7 +1774,7 @@ public class H5 implements java.io.Serializable {
          * @return A non-negative value if successful; otherwise returns a negative value.
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            Name is null.
          **/
    @@ -1635,6 +1782,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Arename_by_name changes the name of attribute that is attached to the object specified by loc_id and
          * obj_name. The attribute named old_attr_name is renamed new_attr_name.
          *
    @@ -1652,7 +1801,7 @@ public class H5 implements java.io.Serializable {
          * @return A non-negative value if successful; otherwise returns a negative value.
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            Name is null.
          **/
    @@ -1661,6 +1810,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Awrite writes an attribute, specified with attr_id. The attribute's memory datatype is specified with
          * mem_type_id. The entire attribute is written from buf to the file.
          *
    @@ -1676,7 +1827,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            data is null.
          **/
    @@ -1685,6 +1836,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Awrite writes an attribute, specified with attr_id. The attribute's memory datatype is specified with
          * mem_type_id. The entire attribute is written from buf to the file.
          *
    @@ -1698,7 +1851,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            data is null.
          **/
    @@ -1709,6 +1862,8 @@ public class H5 implements java.io.Serializable {
         }
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Awrite writes an attribute, specified with attr_id. The attribute's memory datatype is specified with
          * mem_type_id. The entire attribute is written from buf to the file.
          *
    @@ -1722,7 +1877,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            data is null.
          **/
    @@ -1733,6 +1888,8 @@ public class H5 implements java.io.Serializable {
         }
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Awrite writes an attribute, specified with attr_id. The attribute's memory datatype is specified with
          * mem_type_id. The entire attribute is written from data object to the file.
          *
    @@ -1750,7 +1907,7 @@ public class H5 implements java.io.Serializable {
          * @exception HDF5Exception
          *            Failure in the data conversion.
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            data object is null
          **/
    @@ -1809,6 +1966,8 @@ public class H5 implements java.io.Serializable {
         }
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Awrite writes an attribute, specified with attr_id. The attribute's memory datatype is specified with
          * mem_type_id. The entire attribute is written from buffer of double to the file.
          *
    @@ -1824,7 +1983,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *             Error from the HDF-5 Library.
    +     *             Error from the HDF5 Library.
          * @exception NullPointerException
          *            data is null.
          **/
    @@ -1833,6 +1992,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Awrite writes an attribute, specified with attr_id. The attribute's memory datatype is specified with
          * mem_type_id. The entire attribute is written from buffer of double to the file.
          *
    @@ -1846,7 +2007,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            data is null.
          **/
    @@ -1857,6 +2018,8 @@ public class H5 implements java.io.Serializable {
         }
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Awrite writes an attribute, specified with attr_id. The attribute's memory datatype is specified with
          * mem_type_id. The entire attribute is written from buffer of float to the file.
          *
    @@ -1872,7 +2035,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            data is null.
          **/
    @@ -1881,6 +2044,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Awrite writes an attribute, specified with attr_id. The attribute's memory datatype is specified with
          * mem_type_id. The entire attribute is written from buffer of float to the file.
          *
    @@ -1894,7 +2059,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            data is null.
          **/
    @@ -1905,6 +2070,8 @@ public class H5 implements java.io.Serializable {
         }
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Awrite writes an attribute, specified with attr_id. The attribute's memory datatype is specified with
          * mem_type_id. The entire attribute is written from buffer of int to the file.
          *
    @@ -1920,7 +2087,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            data is null.
          **/
    @@ -1929,6 +2096,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Awrite writes an attribute, specified with attr_id. The attribute's memory datatype is specified with
          * mem_type_id. The entire attribute is written from buffer of int to the file.
          *
    @@ -1942,7 +2111,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            data is null.
          **/
    @@ -1953,6 +2122,8 @@ public class H5 implements java.io.Serializable {
         }
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Awrite writes an attribute, specified with attr_id. The attribute's memory datatype is specified with
          * mem_type_id. The entire attribute is written from buffer of long to the file.
          *
    @@ -1968,7 +2139,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            data is null.
          **/
    @@ -1977,6 +2148,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Awrite writes an attribute, specified with attr_id. The attribute's memory datatype is specified with
          * mem_type_id. The entire attribute is written from buffer of long to the file.
          *
    @@ -1990,7 +2163,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            data is null.
          **/
    @@ -2001,6 +2174,8 @@ public class H5 implements java.io.Serializable {
         }
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Awrite writes an attribute, specified with attr_id. The attribute's memory datatype is specified with
          * mem_type_id. The entire attribute is written from buffer of short to the file.
          *
    @@ -2016,7 +2191,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            data is null.
          **/
    @@ -2025,6 +2200,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Awrite writes an attribute, specified with attr_id. The attribute's memory datatype is specified with
          * mem_type_id. The entire attribute is written from buffer of short to the file.
          *
    @@ -2038,7 +2215,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            data is null.
          **/
    @@ -2049,6 +2226,8 @@ public class H5 implements java.io.Serializable {
         }
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Awrite writes an attribute, specified with attr_id. The attribute's memory datatype is specified with
          * mem_type_id. The entire attribute is written from buffer of string to the file.
          *
    @@ -2062,7 +2241,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            data is null.
          **/
    @@ -2070,6 +2249,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Awrite writes an attribute, specified with attr_id. The attribute's memory datatype is specified with
          * mem_type_id. The entire attribute is written from buffer of variable-lenght to the file.
          *
    @@ -2083,7 +2264,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            data is null.
          **/
    @@ -2091,6 +2272,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Awrite_VLStrings writes a variable length String dataset, specified by its identifier attr_id, from
          * the application memory buffer buffer of variable-lenght strings into the file.
          *
    @@ -2106,7 +2289,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            name is null.
          **/
    @@ -2115,6 +2298,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Aget_create_plist retrieves a copy of the attribute creation property list identifier.
          *
          * @param attr_id
    @@ -2123,7 +2308,7 @@ public class H5 implements java.io.Serializable {
          * @return identifier for the attribute's creation property list if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          **/
         public static long H5Aget_create_plist(long attr_id) throws HDF5LibraryException
         {
    @@ -2139,6 +2324,8 @@ public class H5 implements java.io.Serializable {
         private synchronized static native long _H5Aget_create_plist(long attr_id) throws HDF5LibraryException;
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Aiterate2 iterates over the attributes attached to a dataset, named datatype, or group, as
          * specified by obj_id. For each attribute, user-provided data, op_data, with additional information
          * as defined below, is passed to a user-defined function, op, which operates on that attribute.
    @@ -2171,7 +2358,7 @@ public class H5 implements java.io.Serializable {
          *            members were processed with no operator returning non-zero.
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            buf is null.
          **/
    @@ -2180,6 +2367,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5A
    +     *
          * H5Aiterate_by_name iterates over the attributes attached to the dataset or group specified with loc_id
          * and obj_name. For each attribute, user-provided data, op_data, with additional information as defined
          * below, is passed to a user-defined function, op, which operates on that attribute.
    @@ -2216,7 +2405,7 @@ public class H5 implements java.io.Serializable {
          *            members were processed with no operator returning non-zero.
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            buf is null.
          **/
    @@ -2262,8 +2451,17 @@ public class H5 implements java.io.Serializable {
         // H5D: Datasets Interface Functions //
         // //
         // ////////////////////////////////////////////////////////////
    +    /**
    +     * @defgroup JH5D Java Datasets (H5D) Interface
    +     *
    +     * @see H5D, C-API
    +     *
    +     * @see @ref H5D_UG, User Guide
    +     **/
     
         /**
    +     * @ingroup JH5D
    +     *
          * H5Dcopy copies the content of one dataset to another dataset.
          *
          * @param src_did
    @@ -2274,11 +2472,13 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          */
         public synchronized static native int H5Dcopy(long src_did, long dst_did) throws HDF5LibraryException;
     
         /**
    +     * @ingroup JH5D
    +     *
          * H5Dclose ends access to a dataset specified by dataset_id and releases resources used by it.
          *
          * @param dataset_id
    @@ -2287,7 +2487,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          **/
         public static int H5Dclose(long dataset_id) throws HDF5LibraryException
         {
    @@ -2303,6 +2503,8 @@ public class H5 implements java.io.Serializable {
         private synchronized static native int _H5Dclose(long dataset_id) throws HDF5LibraryException;
     
         /**
    +     * @ingroup JH5D
    +     *
          * H5Dcreate creates a new dataset named name at the location specified by loc_id.
          *
          * @param loc_id
    @@ -2323,7 +2525,7 @@ public class H5 implements java.io.Serializable {
          * @return a dataset identifier
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            name is null.
          **/
    @@ -2340,6 +2542,8 @@ public class H5 implements java.io.Serializable {
         }
     
         /**
    +     * @ingroup JH5D
    +     *
          * H5Dcreate2 creates a new dataset named name at the location specified by loc_id.
          *
          * @see public static int H5Dcreate(int loc_id, String name, int type_id, int space_id, int lcpl_id, int
    @@ -2350,6 +2554,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5D
    +     *
          * H5Dcreate_anon creates a dataset in the file specified by loc_id.
          *
          * @param loc_id
    @@ -2366,7 +2572,7 @@ public class H5 implements java.io.Serializable {
          * @return a dataset identifier
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          **/
         public static long H5Dcreate_anon(long loc_id, long type_id, long space_id, long dcpl_id, long dapl_id)
             throws HDF5LibraryException
    @@ -2385,6 +2591,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException;
     
         /**
    +     * @ingroup JH5D
    +     *
          * H5Dfill explicitly fills the dataspace selection in memory, space_id, with the fill value specified in
          * fill.
          *
    @@ -2400,7 +2608,7 @@ public class H5 implements java.io.Serializable {
          *            IN: Dataspace describing memory buffer and containing the selection to be filled.
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            buf is null.
          **/
    @@ -2409,6 +2617,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5D
    +     *
          * H5Dget_access_plist returns an identifier for a copy of the dataset access property list for a dataset.
          *
          * @param dset_id
    @@ -2417,11 +2627,13 @@ public class H5 implements java.io.Serializable {
          * @return a dataset access property list identifier
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          **/
         public synchronized static native long H5Dget_access_plist(long dset_id) throws HDF5LibraryException;
     
         /**
    +     * @ingroup JH5D
    +     *
          * H5Dget_create_plist returns an identifier for a copy of the dataset creation property list for a
          * dataset.
          *
    @@ -2430,7 +2642,7 @@ public class H5 implements java.io.Serializable {
          * @return a dataset creation property list identifier if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          **/
         public static long H5Dget_create_plist(long dataset_id) throws HDF5LibraryException
         {
    @@ -2446,6 +2658,8 @@ public class H5 implements java.io.Serializable {
         private synchronized static native long _H5Dget_create_plist(long dataset_id) throws HDF5LibraryException;
     
         /**
    +     * @ingroup JH5D
    +     *
          * H5Dget_offset returns the address in the file of the dataset dset_id.
          *
          * @param dset_id
    @@ -2454,11 +2668,13 @@ public class H5 implements java.io.Serializable {
          * @return the offset in bytes.
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          **/
         public synchronized static native long H5Dget_offset(long dset_id) throws HDF5LibraryException;
     
         /**
    +     * @ingroup JH5D
    +     *
          * H5Dget_space returns an identifier for a copy of the dataspace for a dataset.
          *
          * @param dataset_id
    @@ -2467,7 +2683,7 @@ public class H5 implements java.io.Serializable {
          * @return a dataspace identifier if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          **/
         public static long H5Dget_space(long dataset_id) throws HDF5LibraryException
         {
    @@ -2483,6 +2699,8 @@ public class H5 implements java.io.Serializable {
         private synchronized static native long _H5Dget_space(long dataset_id) throws HDF5LibraryException;
     
         /**
    +     * @ingroup JH5D
    +     *
          * H5Dget_space_status determines whether space has been allocated for the dataset dset_id.
          *
          * @param dset_id
    @@ -2491,11 +2709,13 @@ public class H5 implements java.io.Serializable {
          * @return the space allocation status
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          **/
         public synchronized static native int H5Dget_space_status(long dset_id) throws HDF5LibraryException;
     
         /**
    +     * @ingroup JH5D
    +     *
          * H5Dget_storage_size returns the amount of storage that is required for the dataset.
          *
          * @param dataset_id
    @@ -2504,12 +2724,14 @@ public class H5 implements java.io.Serializable {
          * @return he amount of storage space allocated for the dataset.
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          **/
         public synchronized static native long H5Dget_storage_size(long dataset_id)
             throws HDF5LibraryException, IllegalArgumentException;
     
         /**
    +     * @ingroup JH5D
    +     *
          * H5Dget_type returns an identifier for a copy of the datatype for a dataset.
          *
          * @param dataset_id
    @@ -2518,7 +2740,7 @@ public class H5 implements java.io.Serializable {
          * @return a datatype identifier if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          **/
         public static long H5Dget_type(long dataset_id) throws HDF5LibraryException
         {
    @@ -2534,6 +2756,8 @@ public class H5 implements java.io.Serializable {
         private synchronized static native long _H5Dget_type(long dataset_id) throws HDF5LibraryException;
     
         /**
    +     * @ingroup JH5D
    +     *
          * H5Diterate iterates over all the data elements in the memory buffer buf, executing the callback
          * function operator once for each such data element.
          *
    @@ -2552,7 +2776,7 @@ public class H5 implements java.io.Serializable {
          *            members were processed with no operator returning non-zero.
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            buf is null.
          **/
    @@ -2561,6 +2785,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5D
    +     *
          * H5Dopen opens the existing dataset specified by a location identifier and name, loc_id and name,
          * respectively.
          *
    @@ -2574,7 +2800,7 @@ public class H5 implements java.io.Serializable {
          * @return a dataset identifier if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            name is null.
          **/
    @@ -2591,6 +2817,8 @@ public class H5 implements java.io.Serializable {
         }
     
         /**
    +     * @ingroup JH5D
    +     *
          * H5Dopen2 opens the existing dataset specified by a location identifier and name, loc_id and name,
          * respectively.
          *
    @@ -2600,6 +2828,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5D
    +     *
          * H5Dread reads a (partial) dataset, specified by its identifier dataset_id, from the file into the
          * application memory buffer buf.
          *
    @@ -2621,7 +2851,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            data buffer is null.
          **/
    @@ -2631,6 +2861,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5D
    +     *
          * H5Dread reads a (partial) dataset, specified by its identifier dataset_id, from the file into the
          * application memory buffer buf.
          *
    @@ -2650,7 +2882,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            data buffer is null.
          **/
    @@ -2662,6 +2894,8 @@ public class H5 implements java.io.Serializable {
         }
     
         /**
    +     * @ingroup JH5D
    +     *
          * H5Dread reads a (partial) dataset, specified by its identifier dataset_id, from the file into the
          * application memory buffer buf.
          *
    @@ -2681,7 +2915,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            data buffer is null.
          **/
    @@ -2693,6 +2927,8 @@ public class H5 implements java.io.Serializable {
         }
     
         /**
    +     * @ingroup JH5D
    +     *
          * H5Dread reads a (partial) dataset, specified by its identifier dataset_id, from the file into the
          * application data object.
          *
    @@ -2716,7 +2952,7 @@ public class H5 implements java.io.Serializable {
          * @exception HDF5Exception
          *            Failure in the data conversion.
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            data object is null.
          **/
    @@ -2808,6 +3044,8 @@ public class H5 implements java.io.Serializable {
         }
     
         /**
    +     * @ingroup JH5D
    +     *
          * H5Dread reads a (partial) dataset, specified by its identifier dataset_id, from the file into the
          * application memory buffer of type double.
          *
    @@ -2829,7 +3067,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            data buffer is null.
          **/
    @@ -2839,6 +3077,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5D
    +     *
          * H5Dread reads a (partial) dataset, specified by its identifier dataset_id, from the file into the
          * application memory buffer of type double.
          *
    @@ -2858,7 +3098,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            data buffer is null.
          **/
    @@ -2870,6 +3110,8 @@ public class H5 implements java.io.Serializable {
         }
     
         /**
    +     * @ingroup JH5D
    +     *
          * H5Dread reads a (partial) dataset, specified by its identifier dataset_id, from the file into the
          * application memory buffer of float.
          *
    @@ -2891,7 +3133,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            data buffer is null.
          **/
    @@ -2901,6 +3143,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5D
    +     *
          * H5Dread reads a (partial) dataset, specified by its identifier dataset_id, from the file into the
          * application memory buffer of float.
          *
    @@ -2920,7 +3164,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            data buffer is null.
          **/
    @@ -2932,6 +3176,8 @@ public class H5 implements java.io.Serializable {
         }
     
         /**
    +     * @ingroup JH5D
    +     *
          * H5Dread reads a (partial) dataset, specified by its identifier dataset_id, from the file into the
          * application memory buffer of int.
          *
    @@ -2953,7 +3199,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            data buffer is null.
          **/
    @@ -2963,6 +3209,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5D
    +     *
          * H5Dread reads a (partial) dataset, specified by its identifier dataset_id, from the file into the
          * application memory buffer of int.
          *
    @@ -2982,7 +3230,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            data buffer is null.
          **/
    @@ -2994,6 +3242,8 @@ public class H5 implements java.io.Serializable {
         }
     
         /**
    +     * @ingroup JH5D
    +     *
          * H5Dread reads a (partial) dataset, specified by its identifier dataset_id, from the file into the
          * application memory buffer of long.
          *
    @@ -3015,7 +3265,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            data buffer is null.
          **/
    @@ -3025,6 +3275,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5D
    +     *
          * H5Dread reads a (partial) dataset, specified by its identifier dataset_id, from the file into the
          * application memory buffer of long.
          *
    @@ -3044,7 +3296,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            data buffer is null.
          **/
    @@ -3056,6 +3308,8 @@ public class H5 implements java.io.Serializable {
         }
     
         /**
    +     * @ingroup JH5D
    +     *
          * H5Dread reads a (partial) dataset, specified by its identifier dataset_id, from the file into the
          * application memory buffer of string.
          *
    @@ -3075,7 +3329,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            data buffer is null.
          **/
    @@ -3085,6 +3339,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5D
    +     *
          * H5Dread reads a (partial) dataset, specified by its identifier dataset_id, from the file into the
          * application memory buffer of short.
          *
    @@ -3106,7 +3362,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            data buffer is null.
          **/
    @@ -3116,6 +3372,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5D
    +     *
          * H5Dread reads a (partial) dataset, specified by its identifier dataset_id, from the file into the
          * application memory buffer of short.
          *
    @@ -3135,7 +3393,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            data buffer is null.
          **/
    @@ -3147,6 +3405,8 @@ public class H5 implements java.io.Serializable {
         }
     
         /**
    +     * @ingroup JH5D
    +     *
          * H5Dread reads a (partial) dataset, specified by its identifier dataset_id, from the file into the
          * application memory buffer of variable-lenght.
          *
    @@ -3166,7 +3426,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            data buffer is null.
          **/
    @@ -3175,6 +3435,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5D
    +     *
          * H5Dread reads a (partial) dataset, specified by its identifier dataset_id, from the file into the
          * application memory buffer of string.
          *
    @@ -3194,7 +3456,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            data buffer is null.
          **/
    @@ -3203,6 +3465,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5D
    +     *
          * H5Dread reads a (partial) dataset, specified by its identifier dataset_id, from the file into the
          * application memory buffer of variable-lenght strings.
          *
    @@ -3222,7 +3486,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            data buffer is null.
          **/
    @@ -3232,6 +3496,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5D
    +     *
          * H5Dset_extent sets the current dimensions of the chunked dataset dset_id to the sizes specified in
          * size.
          *
    @@ -3241,7 +3507,7 @@ public class H5 implements java.io.Serializable {
          *            IN: Array containing the new magnitude of each dimension of the dataset.
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            size is null.
          **/
    @@ -3249,6 +3515,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5D
    +     *
          * H5Dvlen_get_buf_size determines the number of bytes required to store the VL data from the dataset,
          * using the space_id for the selection in the dataset on disk and the type_id for the memory
          * representation of the VL data in memory.
    @@ -3263,7 +3531,7 @@ public class H5 implements java.io.Serializable {
          * @return the size in bytes of the memory buffer required to store the VL data.
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            buf is null.
          **/
    @@ -3271,6 +3539,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException;
     
         /**
    +     * @ingroup JH5D
    +     *
          * H5Dvlen_reclaim reclaims buffer used for VL data.
          *
          * @param type_id
    @@ -3285,7 +3555,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            buf is null.
          *
    @@ -3297,6 +3567,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5D
    +     *
          * H5Dwrite writes a (partial) dataset, specified by its identifier dataset_id, from the application
          * memory buffer into the file.
          *
    @@ -3318,7 +3590,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            name is null.
          **/
    @@ -3328,6 +3600,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5D
    +     *
          * H5Dwrite writes a (partial) dataset, specified by its identifier dataset_id, from the application
          * memory buffer into the file.
          *
    @@ -3347,7 +3621,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            name is null.
          **/
    @@ -3359,6 +3633,8 @@ public class H5 implements java.io.Serializable {
         }
     
         /**
    +     * @ingroup JH5D
    +     *
          * H5Dwrite writes a (partial) dataset, specified by its identifier dataset_id, from the application
          * memory buffer into the file.
          *
    @@ -3378,7 +3654,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            name is null.
          **/
    @@ -3390,6 +3666,8 @@ public class H5 implements java.io.Serializable {
         }
     
         /**
    +     * @ingroup JH5D
    +     *
          * H5Dwrite writes a (partial) dataset, specified by its identifier dataset_id, from the application
          * memory data object into the file.
          *
    @@ -3413,7 +3691,7 @@ public class H5 implements java.io.Serializable {
          * @exception HDF5Exception
          *            Failure in the data conversion.
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            data object is null.
          **/
    @@ -3485,6 +3763,8 @@ public class H5 implements java.io.Serializable {
         }
     
         /**
    +     * @ingroup JH5D
    +     *
          * H5Dwrite writes a (partial) dataset, specified by its identifier dataset_id, from the application
          * memory buffer into the file.
          *
    @@ -3506,7 +3786,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            name is null.
          **/
    @@ -3516,6 +3796,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5D
    +     *
          * H5Dwrite writes a (partial) dataset, specified by its identifier dataset_id, from the application
          * memory buffer into the file.
          *
    @@ -3535,7 +3817,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            name is null.
          **/
    @@ -3548,6 +3830,8 @@ public class H5 implements java.io.Serializable {
         }
     
         /**
    +     * @ingroup JH5D
    +     *
          * H5Dwrite writes a (partial) dataset, specified by its identifier dataset_id, from the application
          * memory buffer into the file.
          *
    @@ -3569,7 +3853,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            name is null.
          **/
    @@ -3579,6 +3863,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5D
    +     *
          * H5Dwrite writes a (partial) dataset, specified by its identifier dataset_id, from the application
          * memory buffer into the file.
          *
    @@ -3598,7 +3884,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            name is null.
          **/
    @@ -3610,6 +3896,8 @@ public class H5 implements java.io.Serializable {
         }
     
         /**
    +     * @ingroup JH5D
    +     *
          * H5Dwrite writes a (partial) dataset, specified by its identifier dataset_id, from the application
          * memory buffer into the file.
          *
    @@ -3631,7 +3919,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            name is null.
          **/
    @@ -3641,6 +3929,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5D
    +     *
          * H5Dwrite writes a (partial) dataset, specified by its identifier dataset_id, from the application
          * memory buffer into the file.
          *
    @@ -3660,7 +3950,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            name is null.
          **/
    @@ -3672,6 +3962,8 @@ public class H5 implements java.io.Serializable {
         }
     
         /**
    +     * @ingroup JH5D
    +     *
          * H5Dwrite writes a (partial) dataset, specified by its identifier dataset_id, from the application
          * memory buffer into the file.
          *
    @@ -3693,7 +3985,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            name is null.
          **/
    @@ -3703,6 +3995,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5D
    +     *
          * H5Dwrite writes a (partial) dataset, specified by its identifier dataset_id, from the application
          * memory buffer into the file.
          *
    @@ -3722,7 +4016,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            name is null.
          **/
    @@ -3734,6 +4028,8 @@ public class H5 implements java.io.Serializable {
         }
     
         /**
    +     * @ingroup JH5D
    +     *
          * H5Dwrite writes a (partial) dataset, specified by its identifier dataset_id, from the application
          * memory buffer into the file.
          *
    @@ -3755,7 +4051,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            name is null.
          **/
    @@ -3765,6 +4061,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5D
    +     *
          * H5Dwrite writes a (partial) dataset, specified by its identifier dataset_id, from the application
          * memory buffer into the file.
          *
    @@ -3784,7 +4082,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            name is null.
          **/
    @@ -3796,6 +4094,8 @@ public class H5 implements java.io.Serializable {
         }
     
         /**
    +     * @ingroup JH5D
    +     *
          * H5Dwrite writes a (partial) dataset, specified by its identifier dataset_id, from the application
          * memory buffer into the file.
          *
    @@ -3815,7 +4115,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            name is null.
          **/
    @@ -3825,6 +4125,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5D
    +     *
          * H5Dwrite writes a (partial) dataset, specified by its identifier dataset_id, from the application
          * memory buffer into the file.
          *
    @@ -3844,7 +4146,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            name is null.
          **/
    @@ -3853,6 +4155,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5D
    +     *
          * H5Dwrite_VLStrings writes a (partial) variable length String dataset, specified by its identifier
          * dataset_id, from the application memory buffer buf into the file.
          *
    @@ -3874,7 +4178,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            name is null.
          **/
    @@ -3885,6 +4189,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5D
    +     *
          * H5Dflush causes all buffers associated with a dataset to be immediately flushed to disk without
          * removing the data from the cache.
          *
    @@ -3892,11 +4198,13 @@ public class H5 implements java.io.Serializable {
          *            IN: Identifier of the dataset to be flushed.
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          **/
         public synchronized static native void H5Dflush(long dset_id) throws HDF5LibraryException;
     
         /**
    +     * @ingroup JH5D
    +     *
          * H5Drefresh causes all buffers associated with a dataset to be cleared and immediately re-loaded with
          * updated contents from disk. This function essentially closes the dataset, evicts all metadata
          * associated with it from the cache, and then re-opens the dataset. The reopened dataset is automatically
    @@ -3906,7 +4214,7 @@ public class H5 implements java.io.Serializable {
          *            IN: Identifier of the dataset to be refreshed.
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          **/
         public synchronized static native void H5Drefresh(long dset_id) throws HDF5LibraryException;
     
    @@ -3926,8 +4234,18 @@ public class H5 implements java.io.Serializable {
         // H5E: Error Stack //
         // //
         // ////////////////////////////////////////////////////////////
    +    /**
    +     *
    +     * @defgroup JH5E Java Error (H5E) Interface
    +     *
    +     * @see H5E, C-API
    +     *
    +     * @see @ref H5E_UG, User Guide
    +     */
     
         /**
    +     * @ingroup JH5E
    +     *
          * H5Eauto_is_v2 determines whether the error auto reporting function for an error stack conforms to the
          * H5E_auto2_t typedef or the H5E_auto1_t typedef.
          *
    @@ -3938,19 +4256,21 @@ public class H5 implements java.io.Serializable {
          *            H5E_auto1_t.
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          **/
         public synchronized static native boolean H5Eauto_is_v2(long stack_id) throws HDF5LibraryException;
     
         /**
    +     * @ingroup JH5E
    +     *
          * H5Eclear clears the error stack for the current thread. H5Eclear can fail if there are problems
          * initializing the library. 

    This may be used by exception handlers to assure that the error condition - * in the HDF-5 library has been reset. + * in the HDF5 library has been reset. * * @return Returns a non-negative value if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public static int H5Eclear() throws HDF5LibraryException { @@ -3959,6 +4279,8 @@ public class H5 implements java.io.Serializable { } /** + * @ingroup JH5E + * * H5Eclear clears the error stack specified by estack_id, or, if estack_id is set to H5E_DEFAULT, the * error stack for the current thread. * @@ -3966,11 +4288,13 @@ public class H5 implements java.io.Serializable { * IN: Error stack identifier. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public static void H5Eclear(long stack_id) throws HDF5LibraryException { H5Eclear2(stack_id); } /** + * @ingroup JH5E + * * H5Eclear2 clears the error stack specified by estack_id, or, if estack_id is set to H5E_DEFAULT, the * error stack for the current thread. * @@ -3978,33 +4302,39 @@ public class H5 implements java.io.Serializable { * IN: Error stack identifier. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native void H5Eclear2(long stack_id) throws HDF5LibraryException; /** + * @ingroup JH5E + * * H5Eclose_msg closes an error message identifier, which can be either a major or minor message. * * @param err_id * IN: Error message identifier. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native void H5Eclose_msg(long err_id) throws HDF5LibraryException; /** + * @ingroup JH5E + * * H5Eclose_stack closes the object handle for an error stack and releases its resources. * * @param stack_id * IN: Error stack identifier. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native void H5Eclose_stack(long stack_id) throws HDF5LibraryException; /** + * @ingroup JH5E + * * H5Ecreate_msg adds an error message to an error class defined by client library or application program. * * @param cls_id @@ -4017,7 +4347,7 @@ public class H5 implements java.io.Serializable { * @return a message identifier * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * msg is null. **/ @@ -4025,16 +4355,20 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5E + * * H5Ecreate_stack creates a new empty error stack and returns the new stack's identifier. * * @return an error stack identifier * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native long H5Ecreate_stack() throws HDF5LibraryException; /** + * @ingroup JH5E + * * H5Eget_class_name retrieves the name of the error class specified by the class identifier. * * @param class_id @@ -4043,23 +4377,27 @@ public class H5 implements java.io.Serializable { * @return the name of the error class * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native String H5Eget_class_name(long class_id) throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5E + * * H5Eget_current_stack copies the current error stack and returns an error stack identifier for the new * copy. * * @return an error stack identifier * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native long H5Eget_current_stack() throws HDF5LibraryException; /** + * @ingroup JH5E + * * H5Eset_current_stack replaces the content of the current error stack with a copy of the content of the * error stack specified by estack_id. * @@ -4067,11 +4405,13 @@ public class H5 implements java.io.Serializable { * IN: Error stack identifier. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native void H5Eset_current_stack(long stack_id) throws HDF5LibraryException; /** + * @ingroup JH5E + * * H5Eget_msg retrieves the error message including its length and type. * * @param msg_id @@ -4082,12 +4422,14 @@ public class H5 implements java.io.Serializable { * @return the error message * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native String H5Eget_msg(long msg_id, int[] type_list) throws HDF5LibraryException; /** + * @ingroup JH5E + * * H5Eget_num retrieves the number of error records in the error stack specified by estack_id (including * major, minor messages and description). * @@ -4097,12 +4439,14 @@ public class H5 implements java.io.Serializable { * @return the number of error messages * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native long H5Eget_num(long stack_id) throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5E + * * H5Eprint2 prints the error stack specified by estack_id on the specified stream, stream. * * @param stack_id @@ -4112,12 +4456,14 @@ public class H5 implements java.io.Serializable { * IN: File pointer, or stderr if null. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native void H5Eprint2(long stack_id, Object stream) throws HDF5LibraryException; /** + * @ingroup JH5E + * * H5Epop deletes the number of error records specified in count from the top of the error stack specified * by estack_id (including major, minor messages and description). * @@ -4127,11 +4473,13 @@ public class H5 implements java.io.Serializable { * IN: Version of the client library or application to which the error class belongs. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native void H5Epop(long stack_id, long count) throws HDF5LibraryException; /** + * @ingroup JH5E + * * H5Epush pushes a new error record onto the error stack specified by estack_id. * * @param stack_id @@ -4152,7 +4500,7 @@ public class H5 implements java.io.Serializable { * IN: Error description string. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * file, func, or msg is null. **/ @@ -4162,6 +4510,8 @@ public class H5 implements java.io.Serializable { H5Epush2(stack_id, file, func, line, cls_id, maj_id, min_id, msg); } /** + * @ingroup JH5E + * * H5Epush2 pushes a new error record onto the error stack specified by estack_id. * * @param stack_id @@ -4182,7 +4532,7 @@ public class H5 implements java.io.Serializable { * IN: Error description string. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * file, func, or msg is null. **/ @@ -4191,6 +4541,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5E + * * H5Eregister_class registers a client library or application program to the HDF5 error API so that the * client library or application program can report errors together with HDF5 library. * @@ -4204,7 +4556,7 @@ public class H5 implements java.io.Serializable { * @return a class identifier * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * name is null. **/ @@ -4212,17 +4564,21 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5E + * * H5Eunregister_class removes the error class specified by class_id. * * @param class_id * IN: Error class identifier. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native void H5Eunregister_class(long class_id) throws HDF5LibraryException; /** + * @ingroup JH5E + * * H5Ewalk walks the error stack specified by estack_id for the current thread and calls the * function specified in func for each error along the way. * @@ -4236,7 +4592,7 @@ public class H5 implements java.io.Serializable { * IN: Data to be passed with func. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * func is null. **/ @@ -4246,6 +4602,8 @@ public class H5 implements java.io.Serializable { H5Ewalk2(stack_id, direction, func, client_data); } /** + * @ingroup JH5E + * * H5Ewalk2 walks the error stack specified by estack_id for the current thread and calls the * function specified in func for each error along the way. * @@ -4259,7 +4617,7 @@ public class H5 implements java.io.Serializable { * IN: Data to be passed with func. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * func is null. **/ @@ -4295,11 +4653,51 @@ public class H5 implements java.io.Serializable { // //////////////////////////////////////////////////////////// // // + // H5ES: Event Set Interface Functions // + // // + // //////////////////////////////////////////////////////////// + /** + * + * @defgroup JH5ES Java Event Set (H5ES) Interface + * + * @see H5ES, C-API + * + * @see @ref H5ES_UG, User Guide + */ + + // /////// unimplemented //////// + // H5_DLL hid_t H5EScreate(void); + // H5_DLL herr_t H5ESwait(hid_t es_id, uint64_t timeout, size_t *num_in_progress, hbool_t *err_occurred); + // H5_DLL herr_t H5EScancel(hid_t es_id, size_t *num_not_canceled, hbool_t *err_occurred); + // H5_DLL herr_t H5ESget_count(hid_t es_id, size_t *count); + // H5_DLL herr_t H5ESget_op_counter(hid_t es_id, uint64_t *counter); + // H5_DLL herr_t H5ESget_err_status(hid_t es_id, hbool_t *err_occurred); + // H5_DLL herr_t H5ESget_err_count(hid_t es_id, size_t *num_errs); + // H5_DLL herr_t H5ESget_err_info(hid_t es_id, size_t num_err_info, H5ES_err_info_t err_info[], + // size_t *err_cleared); + // H5_DLL herr_t H5ESfree_err_info(size_t num_err_info, H5ES_err_info_t err_info[]); + // H5_DLL herr_t H5ESregister_insert_func(hid_t es_id, H5ES_event_insert_func_t func, void *ctx); + // H5_DLL herr_t H5ESregister_complete_func(hid_t es_id, H5ES_event_complete_func_t func, void *ctx); + // H5_DLL herr_t H5ESclose(hid_t es_id); + // + + // //////////////////////////////////////////////////////////// + // // // H5F: File Interface Functions // // // // //////////////////////////////////////////////////////////// + /** + * + * @defgroup JH5F Java File (H5F) Interface + * + * @see H5F, C-API + * + * @see @ref H5F_UG, User Guide + */ /** + * @ingroup JH5F + * * H5Fclose terminates access to an HDF5 file. * * @param file_id @@ -4308,7 +4706,7 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public static int H5Fclose(long file_id) throws HDF5LibraryException { @@ -4324,6 +4722,8 @@ public class H5 implements java.io.Serializable { private synchronized static native int _H5Fclose(long file_id) throws HDF5LibraryException; /** + * @ingroup JH5F + * * H5Fopen opens an existing file and is the primary function for accessing existing HDF5 files. * * @param name @@ -4336,7 +4736,7 @@ public class H5 implements java.io.Serializable { * @return a file identifier if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * name is null. **/ @@ -4356,6 +4756,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5F + * * H5Freopen reopens an HDF5 file. * * @param file_id @@ -4364,7 +4766,7 @@ public class H5 implements java.io.Serializable { * @return a new file identifier if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public static long H5Freopen(long file_id) throws HDF5LibraryException { @@ -4380,25 +4782,27 @@ public class H5 implements java.io.Serializable { private synchronized static native long _H5Freopen(long file_id) throws HDF5LibraryException; /** + * @ingroup JH5F + * * H5Fcreate is the primary function for creating HDF5 files. * * @param name * Name of the file to access. * @param flags * File access flags. Possible values include: - *

      - *
    • - * H5F_ACC_RDWR Allow read and write access to file.
    • - *
    • - * H5F_ACC_RDONLY Allow read-only access to file.
    • - *
    • - * H5F_ACC_TRUNC Truncate file, if it already exists, erasing all data previously stored in the - * file.
    • - *
    • - * H5F_ACC_EXCL Fail if file already exists.
    • - *
    • - * H5P_DEFAULT Apply default file access and creation properties.
    • - *
    + *
      + *
    • + * @ref H5F_ACC_RDWR Allow read and write access to file.
    • + *
    • + * @ref H5F_ACC_RDONLY Allow read-only access to file.
    • + *
    • + * @ref H5F_ACC_TRUNC Truncate file, if it already exists, erasing all data previously stored + * in the file.
    • + *
    • + * @ref H5F_ACC_EXCL Fail if file already exists.
    • + *
    • + * @ref H5P_DEFAULT Apply default file access and creation properties.
    • + *
    * * @param create_id * File creation property list identifier, used when modifying default file meta-data. Use @@ -4411,7 +4815,7 @@ public class H5 implements java.io.Serializable { * @return a file identifier if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * name is null. **/ @@ -4431,8 +4835,10 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5F + * * H5Fflush causes all buffers associated with a file or object to be immediately flushed (written) to - * disk without removing the data from the (memory) cache.

    After this call completes, the file (or + * disk without removing the data from the (memory) cache.

    After this call completes, the file (or * object) is in a consistent state and all data written to date is assured to be permanent. * * @param object_id @@ -4440,9 +4846,9 @@ public class H5 implements java.io.Serializable { * associated with the file, including the file itself, a dataset, a group, an attribute, * or a named data type. * @param scope - * specifies the scope of the flushing action, in the case that the HDF-5 file is not a single + * specifies the scope of the flushing action, in the case that the HDF5 file is not a single * physical file. - *

    Valid values are: + *

    Valid values are: *

      *
    • H5F_SCOPE_GLOBAL Flushes the entire virtual file.
    • *
    • H5F_SCOPE_LOCAL Flushes only the specified file.
    • @@ -4451,11 +4857,13 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native int H5Fflush(long object_id, int scope) throws HDF5LibraryException; /** + * @ingroup JH5F + * * H5Fget_access_plist returns the file access property list identifier of the specified file. * * @param file_id @@ -4464,7 +4872,7 @@ public class H5 implements java.io.Serializable { * @return a file access property list identifier if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public static long H5Fget_access_plist(long file_id) throws HDF5LibraryException { @@ -4480,6 +4888,8 @@ public class H5 implements java.io.Serializable { private synchronized static native long _H5Fget_access_plist(long file_id) throws HDF5LibraryException; /** + * @ingroup JH5F + * * H5Fget_create_plist returns a file creation property list identifier identifying the creation * properties used to create this file. * @@ -4489,7 +4899,7 @@ public class H5 implements java.io.Serializable { * @return a file creation property list identifier if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public static long H5Fget_create_plist(long file_id) throws HDF5LibraryException { @@ -4505,6 +4915,8 @@ public class H5 implements java.io.Serializable { private synchronized static native long _H5Fget_create_plist(long file_id) throws HDF5LibraryException; /** + * @ingroup JH5F + * * H5Fget_filesize retrieves the file size of the HDF5 file. This function * is called after an existing file is opened in order * to learn the true size of the underlying file. @@ -4515,11 +4927,13 @@ public class H5 implements java.io.Serializable { * @return the file size of the HDF5 file * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native long H5Fget_filesize(long file_id) throws HDF5LibraryException; /** + * @ingroup JH5F + * * H5Fget_freespace returns the amount of space that is unused by any objects in the file. * * @param file_id @@ -4528,11 +4942,13 @@ public class H5 implements java.io.Serializable { * @return the amount of free space in the file * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native long H5Fget_freespace(long file_id) throws HDF5LibraryException; /** + * @ingroup JH5F + * * H5Fget_intent retrieves the intended access mode flag passed with H5Fopen when the file was opened. * * @param file_id @@ -4541,11 +4957,13 @@ public class H5 implements java.io.Serializable { * @return the intended access mode flag, as originally passed with H5Fopen. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native int H5Fget_intent(long file_id) throws HDF5LibraryException; /** + * @ingroup JH5F + * * H5Fget_fileno retrieves the "file number" for an open file. * * @param file_id @@ -4554,11 +4972,13 @@ public class H5 implements java.io.Serializable { * @return the unique file number for the file. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native long H5Fget_fileno(long file_id) throws HDF5LibraryException; /** + * @ingroup JH5F + * * H5Fget_mdc_hit_rate queries the metadata cache of the target file to obtain its hit rate (cache hits / * (cache hits + cache misses)) since the last time hit rate statistics were reset. * @@ -4568,11 +4988,13 @@ public class H5 implements java.io.Serializable { * @return the double in which the hit rate is returned. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native double H5Fget_mdc_hit_rate(long file_id) throws HDF5LibraryException; /** + * @ingroup JH5F + * * H5Fget_mdc_size queries the metadata cache of the target file for the desired size information. * * @param file_id @@ -4588,7 +5010,7 @@ public class H5 implements java.io.Serializable { * @return current number of entries in the cache * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * metadata_cache is null. **/ @@ -4596,6 +5018,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException, IllegalArgumentException; /** + * @ingroup JH5F + * * H5Fget_name retrieves the name of the file to which the object obj_id belongs. * * @param obj_id @@ -4604,11 +5028,13 @@ public class H5 implements java.io.Serializable { * @return the filename. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native String H5Fget_name(long obj_id) throws HDF5LibraryException; /** + * @ingroup JH5F + * * H5Fget_obj_count returns the number of open object identifiers for the file. * * @param file_id @@ -4628,12 +5054,14 @@ public class H5 implements java.io.Serializable { * @return the number of open objects. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native long H5Fget_obj_count(long file_id, int types) throws HDF5LibraryException; /** + * @ingroup JH5F + * * H5Fget_obj_ids returns the list of identifiers for all open HDF5 objects fitting the specified * criteria. * @@ -4649,7 +5077,7 @@ public class H5 implements java.io.Serializable { * @return the number of objects placed into obj_id_list. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * obj_id_list is null. **/ @@ -4658,15 +5086,17 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5F + * * H5Fis_hdf5 determines whether a file is in the HDF5 format. * * @param name * File name to check format. * - * @return true if is HDF-5, false if not. + * @return true if is HDF5, false if not. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * name is null. * @@ -4677,6 +5107,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5F + * * H5Fis_accessible determines if the file can be opened with the given fapl. * * @param name @@ -4687,7 +5119,7 @@ public class H5 implements java.io.Serializable { * @return true if file is accessible, false if not. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * name is null. **/ @@ -4695,6 +5127,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5F + * * H5Fmount mounts the file specified by child_id onto the group specified by loc_id and name using the * mount properties plist_id. * @@ -4710,7 +5144,7 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * name is null. **/ @@ -4718,6 +5152,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5F + * * Given a mount point, H5Funmount disassociates the mount point's file from the file mounted there. * * @param loc_id @@ -4728,7 +5164,7 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * name is null. **/ @@ -4736,6 +5172,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5F + * * H5Freset_mdc_hit_rate_stats resets the hit rate statistics counters in the metadata cache associated * with the specified file. * @@ -4743,12 +5181,14 @@ public class H5 implements java.io.Serializable { * IN: Identifier of the target file. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native void H5Freset_mdc_hit_rate_stats(long file_id) throws HDF5LibraryException; /** + * @ingroup JH5F + * * H5Fget_info returns global information for the file associated with the * object identifier obj_id. * @@ -4757,11 +5197,13 @@ public class H5 implements java.io.Serializable { * @return A buffer(H5F_info2_t) for current "global" information about file * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native H5F_info2_t H5Fget_info(long obj_id) throws HDF5LibraryException; /** + * @ingroup JH5F + * * H5Fclear_elink_file_cache evicts all the cached child files in the specified file's external file * cache, causing them to be closed if there is nothing else holding them open. * @@ -4769,12 +5211,14 @@ public class H5 implements java.io.Serializable { * IN: Identifier of the target file. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native void H5Fclear_elink_file_cache(long file_id) throws HDF5LibraryException; /** + * @ingroup JH5F + * * H5Fstart_swmr_write will activate SWMR writing mode for a file associated with file_id. This routine * will prepare and ensure the file is safe for SWMR writing. * @@ -4782,22 +5226,26 @@ public class H5 implements java.io.Serializable { * IN: Identifier of the target file. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native void H5Fstart_swmr_write(long file_id) throws HDF5LibraryException; /** + * @ingroup JH5F + * * H5Fstart_mdc_logging starts logging metadata cache events if logging was previously enabled. * * @param file_id * IN: Identifier of the target file. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native void H5Fstart_mdc_logging(long file_id) throws HDF5LibraryException; /** + * @ingroup JH5F + * * H5Fstop_mdc_logging stops logging metadata cache events if logging was previously enabled and is * currently ongoing. * @@ -4805,11 +5253,13 @@ public class H5 implements java.io.Serializable { * IN: Identifier of the target file. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native void H5Fstop_mdc_logging(long file_id) throws HDF5LibraryException; /** + * @ingroup JH5F + * * H5Fget_mdc_logging_status gets the current metadata cache logging status. * * @param file_id @@ -4821,7 +5271,7 @@ public class H5 implements java.io.Serializable { * mdc_logging_status[1] = is_currently_logging, whether events are currently being logged * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * mdc_logging_status is null. **/ @@ -4830,6 +5280,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5F + * * H5Fget_dset_no_attrs_hint gets the file-level setting to create minimized dataset object headers. * * @param file_id @@ -4838,12 +5290,14 @@ public class H5 implements java.io.Serializable { * @return true if the file-level is set to create minimized dataset object headers, false if not. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native boolean H5Fget_dset_no_attrs_hint(long file_id) throws HDF5LibraryException; /** + * @ingroup JH5F + * * H5Fset_dset_no_attrs_hint sets the file-level setting to create minimized dataset object headers. * * @param file_id @@ -4852,12 +5306,14 @@ public class H5 implements java.io.Serializable { * the minimize hint setting * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native void H5Fset_dset_no_attrs_hint(long file_id, boolean minimize) throws HDF5LibraryException; /** + * @ingroup JH5F + * * H5Fset_libver_bounds sets a different low and high bounds while a file is open. * * @param file_id @@ -4868,7 +5324,7 @@ public class H5 implements java.io.Serializable { * IN: The latest version of the library that will be used for writing objects. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native void H5Fset_libver_bounds(long file_id, int low, int high) throws HDF5LibraryException; @@ -4901,7 +5357,7 @@ public class H5 implements java.io.Serializable { // * @return a pointer to the file handle being used by the low-level // virtual file driver. // * - // * @exception HDF5LibraryException - Error from the HDF-5 Library. + // * @exception HDF5LibraryException - Error from the HDF5 Library. // **/ // public synchronized static native Pointer file_handle // H5Fget_vfd_handle(int file_id, int fapl) @@ -4918,7 +5374,7 @@ public class H5 implements java.io.Serializable { // * // * @return none // * - // * @exception HDF5LibraryException - Error from the HDF-5 Library. + // * @exception HDF5LibraryException - Error from the HDF5 Library. // * @exception NullPointerException - config_ptr is null. // **/ // public synchronized static native void H5Fget_mdc_config(int file_id, H5AC_cache_config_t config_ptr) @@ -4934,7 +5390,7 @@ public class H5 implements java.io.Serializable { // * // * @return none // * - // * @exception HDF5LibraryException - Error from the HDF-5 Library. + // * @exception HDF5LibraryException - Error from the HDF5 Library. // * @exception NullPointerException - config_ptr is null. // **/ // public synchronized static native int H5Fset_mdc_config(int file_id, H5AC_cache_config_t config_ptr) @@ -4979,8 +5435,17 @@ public class H5 implements java.io.Serializable { // H5G: Group Interface Functions // // // // //////////////////////////////////////////////////////////// + /** + * @defgroup JH5G Java Group (H5G) Interface + * + * @see H5G, C-API + * + * @see @ref H5G_UG, User Guide + **/ /** + * @ingroup JH5G + * * H5Gclose releases resources used by a group which was opened by a call to H5Gcreate() or H5Gopen(). * * @param group_id @@ -4989,7 +5454,7 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public static int H5Gclose(long group_id) throws HDF5LibraryException { @@ -5005,6 +5470,8 @@ public class H5 implements java.io.Serializable { private synchronized static native int _H5Gclose(long group_id) throws HDF5LibraryException; /** + * @ingroup JH5G + * * H5Gcreate creates a new group with the specified name at the specified location, loc_id. * * @param loc_id @@ -5022,7 +5489,7 @@ public class H5 implements java.io.Serializable { * @return a valid group identifier * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * name is null. **/ @@ -5043,6 +5510,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5G + * * H5Gcreate_anon creates a new empty group in the file specified by loc_id. * * @param loc_id @@ -5056,7 +5525,7 @@ public class H5 implements java.io.Serializable { * @return a valid group identifier * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public static long H5Gcreate_anon(long loc_id, long gcpl_id, long gapl_id) throws HDF5LibraryException { @@ -5073,6 +5542,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException; /** + * @ingroup JH5G + * * H5Gget_create_plist returns an identifier for the group creation property list associated with the * group specified by group_id. * @@ -5082,11 +5553,13 @@ public class H5 implements java.io.Serializable { * @return an identifier for the group's creation property list * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native long H5Gget_create_plist(long group_id) throws HDF5LibraryException; /** + * @ingroup JH5G + * * H5Gget_info retrieves information about the group specified by group_id. The information is returned in * the group_info struct. * @@ -5096,11 +5569,13 @@ public class H5 implements java.io.Serializable { * @return a structure in which group information is returned * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native H5G_info_t H5Gget_info(long group_id) throws HDF5LibraryException; /** + * @ingroup JH5G + * * H5Gget_info_by_idx retrieves information about a group, according to the group's position within an * index. * @@ -5120,7 +5595,7 @@ public class H5 implements java.io.Serializable { * @return a structure in which group information is returned * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * name is null. **/ @@ -5130,6 +5605,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5G + * * H5Gget_info_by_name retrieves information about the group group_name located in the file or group * specified by loc_id. * @@ -5143,7 +5620,7 @@ public class H5 implements java.io.Serializable { * @return a structure in which group information is returned * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * name is null. **/ @@ -5151,6 +5628,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5G + * * retrieves information of all objects under the group (name) located in the file or group specified by * loc_id. * @@ -5168,7 +5647,7 @@ public class H5 implements java.io.Serializable { * @return the number of items found * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * name is null. */ @@ -5185,6 +5664,8 @@ public class H5 implements java.io.Serializable { } /** + * @ingroup JH5G + * * retrieves information of all objects under the group (name) located in the file or group specified by * loc_id. * @@ -5206,7 +5687,7 @@ public class H5 implements java.io.Serializable { * @return the number of items found * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * name is null. */ @@ -5219,6 +5700,8 @@ public class H5 implements java.io.Serializable { } /** + * @ingroup JH5G + * * retrieves information of all objects under the group (name) located in the file or group specified by * loc_id. * @@ -5242,7 +5725,7 @@ public class H5 implements java.io.Serializable { * @return the number of items found * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * name is null. */ @@ -5255,6 +5738,8 @@ public class H5 implements java.io.Serializable { } /** + * @ingroup JH5G + * * retrieves information of all objects under the group (name) located in the file or group specified by * loc_id. * @@ -5280,7 +5765,7 @@ public class H5 implements java.io.Serializable { * @return the number of items found * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * name is null. */ @@ -5331,6 +5816,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5G + * * H5Gget_obj_info_idx report the name and type of object with index 'idx' in a Group. The 'idx' * corresponds to the index maintained by H5Giterate. Each link is returned, so objects with multiple * links will be counted once for each link. @@ -5349,7 +5836,7 @@ public class H5 implements java.io.Serializable { * @return non-negative if successful, -1 if not. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * name is null. */ @@ -5373,6 +5860,8 @@ public class H5 implements java.io.Serializable { * a lot of time to finish if the number of objects is more than 10,000 */ /** + * @ingroup JH5G + * * retrieves information of all objects (recurvisely) under the group (name) located in the file or group * specified by loc_id up to maximum specified by objMax. * @@ -5392,7 +5881,7 @@ public class H5 implements java.io.Serializable { * @return the number of items found * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * name is null. */ @@ -5433,6 +5922,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5G + * * H5Gn_members report the number of objects in a Group. The 'objects' include everything that will be * visited by H5Giterate. Each link is returned, so objects with multiple links will be counted once for * each link. @@ -5445,7 +5936,7 @@ public class H5 implements java.io.Serializable { * @return the number of members in the group or -1 if error. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * name is null. */ @@ -5467,6 +5958,8 @@ public class H5 implements java.io.Serializable { } /** + * @ingroup JH5G + * * H5Gopen opens an existing group, name, at the location specified by loc_id. * * @param loc_id @@ -5479,7 +5972,7 @@ public class H5 implements java.io.Serializable { * @return a valid group identifier if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * name is null. **/ @@ -5499,6 +5992,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5G + * * H5Gflush causes all buffers associated with a group to be immediately flushed to disk without * removing the data from the cache. * @@ -5506,11 +6001,13 @@ public class H5 implements java.io.Serializable { * IN: Identifier of the group to be flushed. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native void H5Gflush(long group_id) throws HDF5LibraryException; /** + * @ingroup JH5G + * * H5Grefresh causes all buffers associated with a group to be cleared and immediately re-loaded * with updated contents from disk. This function essentially closes the group, evicts all metadata * associated with it from the cache, and then re-opens the group. The reopened group is automatically @@ -5520,7 +6017,7 @@ public class H5 implements java.io.Serializable { * IN: Identifier of the group to be refreshed. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native void H5Grefresh(long group_id) throws HDF5LibraryException; @@ -5553,8 +6050,17 @@ public class H5 implements java.io.Serializable { // H5I: HDF5 Identifier Interface API Functions // // // // //////////////////////////////////////////////////////////// + /** + * @defgroup JH5I Java Identifier (H5I) Interface + * + * @see H5I, C-API + * + * @see @ref H5I_UG, User Guide + **/ /** + * @ingroup JH5I + * * H5Iget_file_id obtains the file ID specified by the identifier, obj_id. * * @param obj_id @@ -5563,11 +6069,13 @@ public class H5 implements java.io.Serializable { * @return the file ID. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native long H5Iget_file_id(long obj_id) throws HDF5LibraryException; /** + * @ingroup JH5I + * * H5Iget_name_long retrieves the name of an object specified by the identifier, obj_id. * @deprecated * @@ -5581,12 +6089,14 @@ public class H5 implements java.io.Serializable { * @return the length of the name retrieved. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ @Deprecated public synchronized static native long H5Iget_name_long(long obj_id, String[] name, long size) throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5I + * * H5Iget_name retrieves the name of an object specified by the identifier, obj_id. * * @param obj_id @@ -5595,11 +6105,13 @@ public class H5 implements java.io.Serializable { * @return String for Attribute name. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native String H5Iget_name(long obj_id) throws HDF5LibraryException; /** + * @ingroup JH5I + * * H5Iget_ref obtains the number of references outstanding specified by the identifier, obj_id. * * @param obj_id @@ -5608,12 +6120,14 @@ public class H5 implements java.io.Serializable { * @return the reference count. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native int H5Iget_ref(long obj_id) throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5I + * * H5Idec_ref decrements the reference count specified by the identifier, obj_id. * If the reference count for an ID reaches zero, the object will be closed. * @@ -5623,12 +6137,14 @@ public class H5 implements java.io.Serializable { * @return the reference count. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native int H5Idec_ref(long obj_id) throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5I + * * H5Iinc_ref increments the reference count specified by the identifier, obj_id. * * @param obj_id @@ -5637,12 +6153,14 @@ public class H5 implements java.io.Serializable { * @return the reference count. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native int H5Iinc_ref(long obj_id) throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5I + * * H5Iget_type retrieves the type of the object identified by obj_id. * * @param obj_id @@ -5651,11 +6169,13 @@ public class H5 implements java.io.Serializable { * @return the object type if successful; otherwise H5I_BADID. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native int H5Iget_type(long obj_id) throws HDF5LibraryException; /** + * @ingroup JH5I + * * H5Iget_type_ref retrieves the reference count on an ID type. The reference count is used by the library * to indicate when an ID type can be destroyed. * @@ -5665,11 +6185,13 @@ public class H5 implements java.io.Serializable { * @return The current reference count on success, negative on failure. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native int H5Iget_type_ref(long type_id) throws HDF5LibraryException; /** + * @ingroup JH5I + * * H5Idec_type_ref decrements the reference count on an identifier type. The reference count is used by * the library to indicate when an identifier type can be destroyed. If the reference count reaches zero, * this function will destroy it. @@ -5680,11 +6202,13 @@ public class H5 implements java.io.Serializable { * @return The current reference count on success, negative on failure. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native int H5Idec_type_ref(long type_id) throws HDF5LibraryException; /** + * @ingroup JH5I + * * H5Iinc_type_ref increments the reference count on an ID type. The reference count is used by the * library to indicate when an ID type can be destroyed. * @@ -5694,11 +6218,13 @@ public class H5 implements java.io.Serializable { * @return The current reference count on success, negative on failure. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native int H5Iinc_type_ref(long type_id) throws HDF5LibraryException; /** + * @ingroup JH5I + * * H5Inmembers returns the number of identifiers of the identifier type specified in type. * * @param type_id @@ -5707,11 +6233,13 @@ public class H5 implements java.io.Serializable { * @return Number of identifiers of the specified identifier type * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native int H5Inmembers(long type_id) throws HDF5LibraryException; /** + * @ingroup JH5I + * * H5Iis_valid indicates if the identifier type specified in obj_id is valid. * * @param obj_id @@ -5720,11 +6248,13 @@ public class H5 implements java.io.Serializable { * @return a boolean, true if the specified identifier id is valid * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native boolean H5Iis_valid(long obj_id) throws HDF5LibraryException; /** + * @ingroup JH5I + * * H5Itype_exists indicates if the identifier type specified in type exists. * * @param type_id @@ -5733,11 +6263,13 @@ public class H5 implements java.io.Serializable { * @return a boolean, true if the specified identifier type exists * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native boolean H5Itype_exists(int type_id) throws HDF5LibraryException; /** + * @ingroup JH5I + * * H5Iclear_type deletes all identifiers of the type identified by the argument type. * * @param type_id @@ -5746,12 +6278,14 @@ public class H5 implements java.io.Serializable { * IN: Whether or not to force deletion of all identifiers * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native void H5Iclear_type(int type_id, boolean force) throws HDF5LibraryException; /** + * @ingroup JH5I + * * H5Idestroy_type deletes an entire identifier type. All identifiers of this type are destroyed * and no new identifiers of this type can be registered. * @@ -5759,7 +6293,7 @@ public class H5 implements java.io.Serializable { * IN: Identifier of identifier type which is to be destroyed * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native void H5Idestroy_type(int type_id) throws HDF5LibraryException; @@ -5785,8 +6319,17 @@ public class H5 implements java.io.Serializable { // ////////////////////////////////////////////////////////////////// // H5L: Link Interface Functions // // ////////////////////////////////////////////////////////////////// + /** + * @defgroup JH5L Java Link (H5L) Interface + * + * @see H5L, C-API + * + * @see @ref H5L_UG, User Guide + **/ /** + * @ingroup JH5L + * * H5Lcopy copies a link from one location to another. * * @param src_loc @@ -5803,7 +6346,7 @@ public class H5 implements java.io.Serializable { * IN: Link access property list identifier * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * name is null. **/ @@ -5812,6 +6355,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5L + * * H5Lcreate_external creates a new soft link to an external object, which is an object in a different * HDF5 file from the location of the link. * @@ -5829,7 +6374,7 @@ public class H5 implements java.io.Serializable { * IN: Link access property list identifier * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * name is null. **/ @@ -5839,6 +6384,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5L + * * H5Lcreate_hard creates a new hard link to a pre-existing object in an HDF5 file. * * @param cur_loc @@ -5855,7 +6402,7 @@ public class H5 implements java.io.Serializable { * IN: Link access property list identifier * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * cur_name or dst_name is null. **/ @@ -5864,6 +6411,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5L + * * H5Lcreate_soft creates a new soft link to an object in an HDF5 file. * * @param link_target @@ -5878,7 +6427,7 @@ public class H5 implements java.io.Serializable { * IN: Link access property list identifier * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * link_name is null. **/ @@ -5887,6 +6436,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5L + * * H5Ldelete removes the link specified from a group. * * @param loc_id @@ -5897,7 +6448,7 @@ public class H5 implements java.io.Serializable { * IN: Link access property list identifier * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * name is null. **/ @@ -5905,6 +6456,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5L + * * H5Ldelete_by_idx removes the nth link in a group according to the specified order and in the specified * index. * @@ -5922,7 +6475,7 @@ public class H5 implements java.io.Serializable { * IN: Link access property list identifier * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * group_name is null. **/ @@ -5931,6 +6484,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5L + * * H5Lexists checks if a link with a particular name exists in a group. * * @param loc_id @@ -5943,7 +6498,7 @@ public class H5 implements java.io.Serializable { * @return a boolean, true if the name exists, otherwise false. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * name is null. **/ @@ -5951,6 +6506,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5L + * * H5Lget_info returns information about the specified link. * * @param loc_id @@ -5963,7 +6520,7 @@ public class H5 implements java.io.Serializable { * @return a buffer(H5L_info_t) for the link information. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * name is null. **/ @@ -5971,6 +6528,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5L + * * H5Lget_info_by_idx opens a named datatype at the location specified by loc_id and return an identifier * for the datatype. * @@ -5990,7 +6549,7 @@ public class H5 implements java.io.Serializable { * @return a buffer(H5L_info_t) for the link information. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * group_name is null. **/ @@ -6000,6 +6559,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5L + * * H5Lget_name_by_idx retrieves name of the nth link in a group, according to the order within a specified * field or index. * @@ -6019,7 +6580,7 @@ public class H5 implements java.io.Serializable { * @return a String for the link name. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * group_name is null. **/ @@ -6028,6 +6589,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5L + * * H5Lget_value returns the link value of a symbolic link. Note that this function is a combination * of H5Lget_info(), H5Lget_val() and for external links, H5Lunpack_elink_val. * @@ -6043,7 +6606,7 @@ public class H5 implements java.io.Serializable { * @return the link type * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * name is null. **/ @@ -6052,6 +6615,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5L + * * H5Lget_value_by_idx retrieves value of the nth link in a group, according to the order within an index. * Note that this function is a combination of H5Lget_info(), H5Lget_val() and for external links, * H5Lunpack_elink_val. @@ -6074,7 +6639,7 @@ public class H5 implements java.io.Serializable { * @return the link type * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * group_name is null. **/ @@ -6084,6 +6649,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5L + * * H5Literate iterates through links in a group. * * @param grp_id @@ -6103,13 +6670,15 @@ public class H5 implements java.io.Serializable { * members were processed with no operator returning non-zero. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native int H5Literate(long grp_id, int idx_type, int order, long idx, H5L_iterate_t op, H5L_iterate_opdata_t op_data) throws HDF5LibraryException; /** + * @ingroup JH5L + * * H5Literate_by_name iterates through links in a group. * * @param grp_id @@ -6133,7 +6702,7 @@ public class H5 implements java.io.Serializable { * members were processed with no operator returning non-zero. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * group_name is null. **/ @@ -6143,6 +6712,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5L + * * H5Lmove renames a link within an HDF5 file. * * @param src_loc @@ -6159,7 +6730,7 @@ public class H5 implements java.io.Serializable { * IN: Link access property list identifier to be associated with the new link. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * name is null. **/ @@ -6168,6 +6739,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5L + * * H5Lvisit recursively visits all links starting from a specified group. * * @param grp_id @@ -6185,12 +6758,14 @@ public class H5 implements java.io.Serializable { * members were processed with no operator returning non-zero. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native int H5Lvisit(long grp_id, int idx_type, int order, H5L_iterate_t op, H5L_iterate_opdata_t op_data) throws HDF5LibraryException; /** + * @ingroup JH5L + * * H5Lvisit_by_name recursively visits all links starting from a specified group. * * @param loc_id @@ -6212,7 +6787,7 @@ public class H5 implements java.io.Serializable { * members were processed with no operator returning non-zero. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * group_name is null. **/ @@ -6222,6 +6797,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5L + * * H5Lis_registered tests whether a user-defined link class is currently registered, * either by the HDF5 Library or by the user through the use of H5Lregister. * @@ -6233,11 +6810,13 @@ public class H5 implements java.io.Serializable { * user-defined class identifier. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native int H5Lis_registered(int link_cls_id) throws HDF5LibraryException; /** + * @ingroup JH5L + * * H5Lunregister unregisters a class of user-defined links, preventing them from being traversed, queried, * moved, etc. * @@ -6245,7 +6824,7 @@ public class H5 implements java.io.Serializable { * IN: User-defined link class identifier * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native void H5Lunregister(int link_cls_id) throws HDF5LibraryException; @@ -6279,8 +6858,17 @@ public class H5 implements java.io.Serializable { // H5O: HDF5 1.8 Object Interface API Functions // // // // //////////////////////////////////////////////////////////// + /** + * @defgroup JH5O Java Object (H5O) Interface + * + * @see H5O, C-API + * + * @see @ref H5O_UG, User Guide + **/ /** + * @ingroup JH5O + * * H5Oclose closes the group, dataset, or named datatype specified. * * @param object_id @@ -6289,7 +6877,7 @@ public class H5 implements java.io.Serializable { * @return non-negative on success * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public static int H5Oclose(long object_id) throws HDF5LibraryException { @@ -6305,6 +6893,8 @@ public class H5 implements java.io.Serializable { private synchronized static native int _H5Oclose(long object_id) throws HDF5LibraryException; /** + * @ingroup JH5O + * * H5Ocopy copies the group, dataset or named datatype specified from the file or group specified by * source location to the destination location. * @@ -6322,7 +6912,7 @@ public class H5 implements java.io.Serializable { * IN: Link creation property list for the new hard link * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * name is null. **/ @@ -6331,6 +6921,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5O + * * H5Oget_comment retrieves the comment for the specified object. * * @param obj_id @@ -6339,12 +6931,14 @@ public class H5 implements java.io.Serializable { * @return the comment * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native String H5Oget_comment(long obj_id) throws HDF5LibraryException, IllegalArgumentException; /** + * @ingroup JH5O + * * H5Oset_comment sets the comment for the specified object. * * @param obj_id @@ -6353,7 +6947,7 @@ public class H5 implements java.io.Serializable { * IN: The new comment. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * * @deprecated As of HDF5 1.8 in favor of object attributes. **/ @@ -6362,6 +6956,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException; /** + * @ingroup JH5O + * * H5Oget_comment_by_name retrieves the comment for an object. * * @param loc_id @@ -6374,7 +6970,7 @@ public class H5 implements java.io.Serializable { * @return the comment * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * name is null. **/ @@ -6382,6 +6978,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, IllegalArgumentException, NullPointerException; /** + * @ingroup JH5O + * * H5Oset_comment_by_name sets the comment for the specified object. * * @param loc_id @@ -6394,7 +6992,7 @@ public class H5 implements java.io.Serializable { * IN: Link access property list identifier. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * name is null. * @@ -6406,6 +7004,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5O + * * H5Oget_info retrieves the metadata for an object specified by an identifier. * * @param loc_id @@ -6414,7 +7014,7 @@ public class H5 implements java.io.Serializable { * @return object information * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * name is null. **/ @@ -6424,6 +7024,8 @@ public class H5 implements java.io.Serializable { } /** + * @ingroup JH5O + * * H5Oget_info retrieves the metadata for an object specified by an identifier. * * @param loc_id @@ -6434,7 +7036,7 @@ public class H5 implements java.io.Serializable { * @return object information * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * name is null. **/ @@ -6442,56 +7044,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** - * H5Oget_info_by_name retrieves the metadata for an object, identifying the object by location and - * relative name. - * - * @param loc_id - * IN: File or group identifier specifying location of group in which object is located - * @param name - * IN: Relative name of group - * @param lapl_id - * IN: Access property list identifier for the link pointing to the object (Not currently used; - * pass as H5P_DEFAULT.) - * - * @return object information - * - * @exception HDF5LibraryException - * Error from the HDF-5 Library. - * @exception NullPointerException - * name is null. - **/ - public static H5O_info_t H5Oget_info_by_name(long loc_id, String name, long lapl_id) - throws HDF5LibraryException, NullPointerException - { - return H5Oget_info_by_name(loc_id, name, HDF5Constants.H5O_INFO_ALL, lapl_id); - } - - /** - * H5Oget_info_by_name retrieves the metadata for an object, identifying the object by location and - * relative name. - * - * @param loc_id - * IN: File or group identifier specifying location of group in which object is located - * @param name - * IN: Relative name of group - * @param fields - * IN: Object fields to select - * @param lapl_id - * IN: Access property list identifier for the link pointing to the object (Not currently used; - * pass as H5P_DEFAULT.) + * @ingroup JH5O * - * @return object information - * - * @exception HDF5LibraryException - * Error from the HDF-5 Library. - * @exception NullPointerException - * name is null. - **/ - public synchronized static native H5O_info_t H5Oget_info_by_name(long loc_id, String name, int fields, - long lapl_id) - throws HDF5LibraryException, NullPointerException; - - /** * H5Oget_info_by_idx retrieves the metadata for an object, identifying the object by an index position. * * @param loc_id @@ -6511,7 +7065,7 @@ public class H5 implements java.io.Serializable { * @return object information * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * name is null. **/ @@ -6524,6 +7078,8 @@ public class H5 implements java.io.Serializable { } /** + * @ingroup JH5O + * * H5Oget_info_by_idx retrieves the metadata for an object, identifying the object by an index position. * * @param loc_id @@ -6545,7 +7101,7 @@ public class H5 implements java.io.Serializable { * @return object information * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * name is null. **/ @@ -6555,50 +7111,10 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** - * H5Oget_native_info retrieves the native HDF5-specific metadata for an HDF5 object specified by an - * identifier. Native HDF5-specific metadata includes things like object header information and object - * storage layout information. - * - * @param loc_id - * IN: Identifier for target object - * - * @return object information - * - * @exception HDF5LibraryException - * Error from the HDF-5 Library. - * @exception NullPointerException - * name is null. - **/ - public static H5O_native_info_t H5Oget_native_info(long loc_id) - throws HDF5LibraryException, NullPointerException - { - return H5Oget_native_info(loc_id, HDF5Constants.H5O_NATIVE_INFO_ALL); - } - - /** - * H5Oget_native_info retrieves the native HDF5-specific metadata for an HDF5 object specified by an - * identifier. Native HDF5-specific metadata includes things like object header information and object - * storage layout information. - * - * @param loc_id - * IN: Identifier for target object - * @param fields - * IN: Object fields to select - * - * @return object information + * @ingroup JH5O * - * @exception HDF5LibraryException - * Error from the HDF-5 Library. - * @exception NullPointerException - * name is null. - **/ - public synchronized static native H5O_native_info_t H5Oget_native_info(long loc_id, int fields) - throws HDF5LibraryException, NullPointerException; - - /** - * H5Oget_native_info_by_name retrieves the native HDF5-specific metadata for an HDF5 object, identifying - * the object by location and relative name. Native HDF5-specific metadata includes things like object - * header information and object storage layout information. + * H5Oget_info_by_name retrieves the metadata for an object, identifying the object by location and + * relative name. * * @param loc_id * IN: File or group identifier specifying location of group in which object is located @@ -6611,20 +7127,21 @@ public class H5 implements java.io.Serializable { * @return object information * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * name is null. **/ - public static H5O_native_info_t H5Oget_native_info_by_name(long loc_id, String name, long lapl_id) + public static H5O_info_t H5Oget_info_by_name(long loc_id, String name, long lapl_id) throws HDF5LibraryException, NullPointerException { - return H5Oget_native_info_by_name(loc_id, name, HDF5Constants.H5O_NATIVE_INFO_ALL, lapl_id); + return H5Oget_info_by_name(loc_id, name, HDF5Constants.H5O_INFO_ALL, lapl_id); } /** - * H5Oget_native_info_by_name retrieves the native HDF5-specific metadata for an HDF5 object, identifying - * the object by location and relative name. Native HDF5-specific metadata includes things like object - * header information and object storage layout information. + * @ingroup JH5O + * + * H5Oget_info_by_name retrieves the metadata for an object, identifying the object by location and + * relative name. * * @param loc_id * IN: File or group identifier specifying location of group in which object is located @@ -6639,81 +7156,17 @@ public class H5 implements java.io.Serializable { * @return object information * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * name is null. **/ - public synchronized static native H5O_native_info_t H5Oget_native_info_by_name(long loc_id, String name, - int fields, long lapl_id) + public synchronized static native H5O_info_t H5Oget_info_by_name(long loc_id, String name, int fields, + long lapl_id) throws HDF5LibraryException, NullPointerException; /** - * H5Oget_native_info_by_idx retrieves the native HDF5-specific metadata for an HDF5 object, identifying - * the object by an index position. Native HDF5-specific metadata includes things like object header - * information and object storage layout information. - * - * @param loc_id - * IN: File or group identifier - * @param group_name - * IN: Name of group, relative to loc_id, in which object is located - * @param idx_type - * IN: Type of index by which objects are ordered - * @param order - * IN: Order of iteration within index - * @param n - * IN: Object to open - * @param lapl_id - * IN: Access property list identifier for the link pointing to the object (Not currently used; - * pass as H5P_DEFAULT.) - * - * @return object information - * - * @exception HDF5LibraryException - * Error from the HDF-5 Library. - * @exception NullPointerException - * name is null. - **/ - public static H5O_native_info_t H5Oget_native_info_by_idx(long loc_id, String group_name, int idx_type, - int order, long n, long lapl_id) - throws HDF5LibraryException, NullPointerException - { - return H5Oget_native_info_by_idx(loc_id, group_name, idx_type, order, n, - HDF5Constants.H5O_NATIVE_INFO_ALL, lapl_id); - } - - /** - * H5Oget_native_info_by_idx retrieves the native HDF5-specific metadata for an HDF5 object, identifying - * the object by an index position. Native HDF5-specific metadata includes things like object header - * information and object storage layout information. - * - * @param loc_id - * IN: File or group identifier - * @param group_name - * IN: Name of group, relative to loc_id, in which object is located - * @param idx_type - * IN: Type of index by which objects are ordered - * @param order - * IN: Order of iteration within index - * @param n - * IN: Object to open - * @param fields - * IN: Object fields to select - * @param lapl_id - * IN: Access property list identifier for the link pointing to the object (Not currently used; - * pass as H5P_DEFAULT.) - * - * @return object information + * @ingroup JH5O * - * @exception HDF5LibraryException - * Error from the HDF-5 Library. - * @exception NullPointerException - * name is null. - **/ - public synchronized static native H5O_native_info_t H5Oget_native_info_by_idx( - long loc_id, String group_name, int idx_type, int order, long n, int fields, long lapl_id) - throws HDF5LibraryException, NullPointerException; - - /** * H5Olink creates a new hard link to an object in an HDF5 file. * * @param obj_id @@ -6728,7 +7181,7 @@ public class H5 implements java.io.Serializable { * IN: Access property list identifier. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * name is null. **/ @@ -6737,6 +7190,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5O + * * H5Oopen opens a group, dataset, or named datatype specified by a location and a path name. * * @param loc_id @@ -6749,7 +7204,7 @@ public class H5 implements java.io.Serializable { * @return an object identifier for the opened object * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * name is null. **/ @@ -6769,6 +7224,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5O + * * H5Ovisit recursively visits all objects accessible from a specified object. * * @param obj_id @@ -6787,7 +7244,7 @@ public class H5 implements java.io.Serializable { * members were processed with no operator returning non-zero. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * name is null. **/ @@ -6798,6 +7255,8 @@ public class H5 implements java.io.Serializable { } /** + * @ingroup JH5O + * * H5Ovisit recursively visits all objects accessible from a specified object. * * @param obj_id @@ -6818,7 +7277,7 @@ public class H5 implements java.io.Serializable { * members were processed with no operator returning non-zero. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * name is null. **/ @@ -6827,6 +7286,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5O + * * H5Ovisit_by_name recursively visits all objects starting from a specified object. * * @param loc_id @@ -6849,7 +7310,7 @@ public class H5 implements java.io.Serializable { * members were processed with no operator returning non-zero. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * name is null. **/ @@ -6862,6 +7323,8 @@ public class H5 implements java.io.Serializable { } /** + * @ingroup JH5O + * * H5Ovisit_by_name recursively visits all objects starting from a specified object. * * @param loc_id @@ -6886,7 +7349,7 @@ public class H5 implements java.io.Serializable { * members were processed with no operator returning non-zero. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * name is null. **/ @@ -6896,6 +7359,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5O + * * H5Oexists_by_name is used by an application to check that an existing link resolves to an object. * Primarily, it is designed to check for dangling soft, external, or user-defined links. * @@ -6909,7 +7374,7 @@ public class H5 implements java.io.Serializable { * @return Returns TRUE or FALSE if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * name is null. **/ @@ -6917,28 +7382,34 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5O + * * H5Odecr_refcount decrements the hard link reference count for an object. * * @param object_id * IN: Object identifier * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native void H5Odecr_refcount(long object_id) throws HDF5LibraryException; /** + * @ingroup JH5O + * * H5Oincr_refcount increments the hard link reference count for an object. * * @param object_id * IN: Object identifier * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native void H5Oincr_refcount(long object_id) throws HDF5LibraryException; /** + * @ingroup JH5O + * * H5Oopen_by_token opens a group, dataset, or named datatype using its object token within an HDF5 file. * * @param loc_id @@ -6949,7 +7420,7 @@ public class H5 implements java.io.Serializable { * @return an object identifier for the opened object * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public static long H5Oopen_by_token(long loc_id, H5O_token_t token) throws HDF5LibraryException { @@ -6968,6 +7439,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5O + * * H5Oopen_by_idx opens the nth object in the group specified. * * @param loc_id @@ -6986,7 +7459,7 @@ public class H5 implements java.io.Serializable { * @return an object identifier for the opened object * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * group_name is null. **/ @@ -7007,6 +7480,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5O + * * H5Oflush causes all buffers associated with an object to be immediately flushed to disk without * removing the data from the cache. object_id can be any named object associated with an HDF5 file * including a dataset, a group, or a committed datatype. @@ -7015,11 +7490,13 @@ public class H5 implements java.io.Serializable { * IN: Identifier of the object to be flushed. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native void H5Oflush(long object_id) throws HDF5LibraryException; /** + * @ingroup JH5O + * * H5Orefresh causes all buffers associated with an object to be cleared and immediately re-loaded with * updated contents from disk. This function essentially closes the object, evicts all metadata associated * with it from the cache, and then re-opens the object. The reopened object is automatically @@ -7030,11 +7507,13 @@ public class H5 implements java.io.Serializable { * IN: Identifier of the object to be refreshed. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native void H5Orefresh(long object_id) throws HDF5LibraryException; /** + * @ingroup JH5O + * * H5Odisable_mdc_flushes corks an object, keeping dirty entries associated with the object in the * metadata cache. * @@ -7042,7 +7521,10 @@ public class H5 implements java.io.Serializable { * IN: Identifier of the object to be corked. **/ public synchronized static native void H5Odisable_mdc_flushes(long object_id); + /** + * @ingroup JH5O + * * H5Oenable_mdc_flushes uncorks an object, keeping dirty entries associated with the object in the * metadata cache. * @@ -7050,7 +7532,10 @@ public class H5 implements java.io.Serializable { * IN: Identifier of the object to be uncorked. **/ public synchronized static native void H5Oenable_mdc_flushes(long object_id); + /** + * @ingroup JH5O + * * H5Oare_mdc_flushes_disabled retrieve the object's "cork" status. * * @param object_id @@ -7062,21 +7547,201 @@ public class H5 implements java.io.Serializable { **/ public synchronized static native boolean H5Oare_mdc_flushes_disabled(long object_id); - // /////// unimplemented //////// - // herr_t H5Otoken_cmp(hid_t loc_id, const H5O_token_t *token1, const H5O_token_t *token2, - // int *cmp_value); - // herr_t H5Otoken_to_str(hid_t loc_id, const H5O_token_t *token, char **token_str); - // herr_t H5Otoken_from_str(hid_t loc_id, const char *token_str, H5O_token_t *token); - - // //////////////////////////////////////////////////////////// - // // - // H5P: Property List Interface Functions // - // // + /** + * @ingroup JH5O + * + * H5Oget_native_info retrieves the native HDF5-specific metadata for an HDF5 object specified by an + * identifier. Native HDF5-specific metadata includes things like object header information and object + * storage layout information. + * + * @param loc_id + * IN: Identifier for target object + * + * @return object information + * + * @exception HDF5LibraryException + * Error from the HDF5 Library. + * @exception NullPointerException + * name is null. + **/ + public static H5O_native_info_t H5Oget_native_info(long loc_id) + throws HDF5LibraryException, NullPointerException + { + return H5Oget_native_info(loc_id, HDF5Constants.H5O_NATIVE_INFO_ALL); + } + + /** + * @ingroup JH5O + * + * H5Oget_native_info retrieves the native HDF5-specific metadata for an HDF5 object specified by an + * identifier. Native HDF5-specific metadata includes things like object header information and object + * storage layout information. + * + * @param loc_id + * IN: Identifier for target object + * @param fields + * IN: Object fields to select + * + * @return object information + * + * @exception HDF5LibraryException + * Error from the HDF5 Library. + * @exception NullPointerException + * name is null. + **/ + public synchronized static native H5O_native_info_t H5Oget_native_info(long loc_id, int fields) + throws HDF5LibraryException, NullPointerException; + + /** + * @ingroup JH5O + * + * H5Oget_native_info_by_idx retrieves the native HDF5-specific metadata for an HDF5 object, identifying + * the object by an index position. Native HDF5-specific metadata includes things like object header + * information and object storage layout information. + * + * @param loc_id + * IN: File or group identifier + * @param group_name + * IN: Name of group, relative to loc_id, in which object is located + * @param idx_type + * IN: Type of index by which objects are ordered + * @param order + * IN: Order of iteration within index + * @param n + * IN: Object to open + * @param lapl_id + * IN: Access property list identifier for the link pointing to the object (Not currently used; + * pass as H5P_DEFAULT.) + * + * @return object information + * + * @exception HDF5LibraryException + * Error from the HDF5 Library. + * @exception NullPointerException + * name is null. + **/ + public static H5O_native_info_t H5Oget_native_info_by_idx(long loc_id, String group_name, int idx_type, + int order, long n, long lapl_id) + throws HDF5LibraryException, NullPointerException + { + return H5Oget_native_info_by_idx(loc_id, group_name, idx_type, order, n, + HDF5Constants.H5O_NATIVE_INFO_ALL, lapl_id); + } + + /** + * @ingroup JH5O + * + * H5Oget_native_info_by_idx retrieves the native HDF5-specific metadata for an HDF5 object, identifying + * the object by an index position. Native HDF5-specific metadata includes things like object header + * information and object storage layout information. + * + * @param loc_id + * IN: File or group identifier + * @param group_name + * IN: Name of group, relative to loc_id, in which object is located + * @param idx_type + * IN: Type of index by which objects are ordered + * @param order + * IN: Order of iteration within index + * @param n + * IN: Object to open + * @param fields + * IN: Object fields to select + * @param lapl_id + * IN: Access property list identifier for the link pointing to the object (Not currently used; + * pass as H5P_DEFAULT.) + * + * @return object information + * + * @exception HDF5LibraryException + * Error from the HDF5 Library. + * @exception NullPointerException + * name is null. + **/ + public synchronized static native H5O_native_info_t H5Oget_native_info_by_idx( + long loc_id, String group_name, int idx_type, int order, long n, int fields, long lapl_id) + throws HDF5LibraryException, NullPointerException; + + /** + * @ingroup JH5O + * + * H5Oget_native_info_by_name retrieves the native HDF5-specific metadata for an HDF5 object, identifying + * the object by location and relative name. Native HDF5-specific metadata includes things like object + * header information and object storage layout information. + * + * @param loc_id + * IN: File or group identifier specifying location of group in which object is located + * @param name + * IN: Relative name of group + * @param lapl_id + * IN: Access property list identifier for the link pointing to the object (Not currently used; + * pass as H5P_DEFAULT.) + * + * @return object information + * + * @exception HDF5LibraryException + * Error from the HDF5 Library. + * @exception NullPointerException + * name is null. + **/ + public static H5O_native_info_t H5Oget_native_info_by_name(long loc_id, String name, long lapl_id) + throws HDF5LibraryException, NullPointerException + { + return H5Oget_native_info_by_name(loc_id, name, HDF5Constants.H5O_NATIVE_INFO_ALL, lapl_id); + } + + /** + * @ingroup JH5O + * + * H5Oget_native_info_by_name retrieves the native HDF5-specific metadata for an HDF5 object, identifying + * the object by location and relative name. Native HDF5-specific metadata includes things like object + * header information and object storage layout information. + * + * @param loc_id + * IN: File or group identifier specifying location of group in which object is located + * @param name + * IN: Relative name of group + * @param fields + * IN: Object fields to select + * @param lapl_id + * IN: Access property list identifier for the link pointing to the object (Not currently used; + * pass as H5P_DEFAULT.) + * + * @return object information + * + * @exception HDF5LibraryException + * Error from the HDF5 Library. + * @exception NullPointerException + * name is null. + **/ + public synchronized static native H5O_native_info_t H5Oget_native_info_by_name(long loc_id, String name, + int fields, long lapl_id) + throws HDF5LibraryException, NullPointerException; + + // /////// unimplemented //////// + // herr_t H5Otoken_cmp(hid_t loc_id, const H5O_token_t *token1, const H5O_token_t *token2, + // int *cmp_value); + // herr_t H5Otoken_to_str(hid_t loc_id, const H5O_token_t *token, char **token_str); + // herr_t H5Otoken_from_str(hid_t loc_id, const char *token_str, H5O_token_t *token); + + // //////////////////////////////////////////////////////////// + // // + // H5P: Property List Interface Functions // + // // // //////////////////////////////////////////////////////////// // /////// Generic property list routines /////// + /** + * @defgroup JH5P Java Property List (H5P) Interface + * + * @see H5P, C-API + * + * @see @ref H5P_UG, User Guide + **/ /** + * @ingroup JH5P + * * H5Pget_class_name retrieves the name of a generic property list class * * @param plid @@ -7084,11 +7749,13 @@ public class H5 implements java.io.Serializable { * @return name of a property list if successful; null if failed * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. */ public synchronized static native String H5Pget_class_name(long plid) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pcreate creates a new property as an instance of some property list class. * * @param type @@ -7097,7 +7764,7 @@ public class H5 implements java.io.Serializable { * @return a property list identifier (plist) if successful; otherwise Fail (-1). * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public static long H5Pcreate(long type) throws HDF5LibraryException { @@ -7113,6 +7780,8 @@ public class H5 implements java.io.Serializable { private synchronized static native long _H5Pcreate(long type) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pget retrieves a copy of the value for a property in a property list (support integer only) * * @param plid @@ -7122,11 +7791,13 @@ public class H5 implements java.io.Serializable { * @return value for a property if successful; a negative value if failed * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. */ public synchronized static native int H5Pget(long plid, String name) throws HDF5LibraryException; /** + * @ingroup JH5P + * * Sets a property list value (support integer only) * * @param plid @@ -7138,12 +7809,14 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful; a negative value if failed * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. */ public synchronized static native int H5Pset(long plid, String name, int value) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pexist determines whether a property exists within a property list or class * * @param plid @@ -7154,11 +7827,13 @@ public class H5 implements java.io.Serializable { * exist; * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. */ public synchronized static native boolean H5Pexist(long plid, String name) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pget_size retrieves the size of a property's value in bytes * * @param plid @@ -7168,11 +7843,13 @@ public class H5 implements java.io.Serializable { * @return size of a property's value if successful; a negative value if failed * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. */ public synchronized static native long H5Pget_size(long plid, String name) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pget_nprops retrieves the number of properties in a property list or class * * @param plid @@ -7180,11 +7857,13 @@ public class H5 implements java.io.Serializable { * @return number of properties if successful; a negative value if failed * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. */ public synchronized static native long H5Pget_nprops(long plid) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pget_class returns the property list class for the property list identified by the plist parameter. * * @param plist @@ -7192,11 +7871,13 @@ public class H5 implements java.io.Serializable { * @return a property list class if successful. Otherwise returns H5P_ROOT (-1). * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native long H5Pget_class(long plist) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pget_class_parent retrieves an identifier for the parent class of a property class * * @param plid @@ -7204,11 +7885,13 @@ public class H5 implements java.io.Serializable { * @return a valid parent class object identifier if successful; a negative value if failed * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. */ public synchronized static native long H5Pget_class_parent(long plid) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pequal determines if two property lists or classes are equal * * @param plid1 @@ -7219,11 +7902,13 @@ public class H5 implements java.io.Serializable { * @return positive value if equal; zero if unequal, a negative value if failed * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. */ public synchronized static native int H5Pequal(long plid1, long plid2) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pequal determines if two property lists or classes are equal * * @param plid1 @@ -7234,7 +7919,7 @@ public class H5 implements java.io.Serializable { * @return TRUE if equal, FALSE if unequal * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. */ public static boolean H5P_equal(long plid1, long plid2) throws HDF5LibraryException { @@ -7244,6 +7929,8 @@ public class H5 implements java.io.Serializable { } /** + * @ingroup JH5P + * * H5Pisa_class checks to determine whether a property list is a member of the specified class * * @param plist @@ -7253,11 +7940,13 @@ public class H5 implements java.io.Serializable { * @return a positive value if equal; zero if unequal; a negative value if failed * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. */ public synchronized static native int H5Pisa_class(long plist, long pclass) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pcopy_prop copies a property from one property list or class to another * * @param dst_id @@ -7269,12 +7958,14 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful; a negative value if failed * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. */ public synchronized static native int H5Pcopy_prop(long dst_id, long src_id, String name) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Premove removes a property from a property list * * @param plid @@ -7284,11 +7975,13 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful; a negative value if failed * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. */ public synchronized static native int H5Premove(long plid, String name) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Punregister removes a property from a property list class * * @param plid @@ -7298,11 +7991,13 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful; a negative value if failed * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. */ public synchronized static native int H5Punregister(long plid, String name) throws HDF5LibraryException; /** + * @ingroup JH5P + * * Closes an existing property list class * * @param plid @@ -7310,7 +8005,7 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful; a negative value if failed * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. */ public static int H5Pclose_class(long plid) throws HDF5LibraryException { @@ -7326,6 +8021,8 @@ public class H5 implements java.io.Serializable { private synchronized static native int _H5Pclose_class(long plid) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pclose terminates access to a property list. * * @param plist @@ -7333,7 +8030,7 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public static int H5Pclose(long plist) throws HDF5LibraryException { @@ -7349,6 +8046,8 @@ public class H5 implements java.io.Serializable { private synchronized static native int _H5Pclose(long plist) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pcopy copies an existing property list to create a new property list. * * @param plist @@ -7357,7 +8056,7 @@ public class H5 implements java.io.Serializable { * @return a property list identifier if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public static long H5Pcopy(long plist) throws HDF5LibraryException { @@ -7394,6 +8093,8 @@ public class H5 implements java.io.Serializable { // typedef herr_t (*H5P_iterate_t)(hid_t id, const char *name, void *iter_data); /** + * @ingroup JH5P + * * H5Pcreate_class_nocb creates an new property class with no callback functions. * * @param parent_class @@ -7404,7 +8105,7 @@ public class H5 implements java.io.Serializable { * @return a property list identifier if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public static long H5Pcreate_class_nocb(long parent_class, String name) throws HDF5LibraryException { @@ -7440,6 +8141,8 @@ public class H5 implements java.io.Serializable { // H5P_cls_close_func_t close_data) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pregister2_nocb registers a property list with no callback functions. * * @param plist_class @@ -7452,7 +8155,7 @@ public class H5 implements java.io.Serializable { * IN: Default value of the property * * @exception HDF5LibraryException - * - Error from the HDF-5 Library. + * - Error from the HDF5 Library. **/ public synchronized static native void H5Pregister2_nocb(long plist_class, String name, long size, byte[] def_value) throws HDF5LibraryException; @@ -7463,6 +8166,8 @@ public class H5 implements java.io.Serializable { // H5P_prp_compare_func_cb prp_cmp, H5P_prp_close_func_cb prp_close) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pinsert2_nocb inserts a property list with no callback functions. * * @param plist @@ -7475,7 +8180,7 @@ public class H5 implements java.io.Serializable { * IN: Default value of the property * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native void H5Pinsert2_nocb(long plist, String name, long size, byte[] value) throws HDF5LibraryException; @@ -7486,6 +8191,8 @@ public class H5 implements java.io.Serializable { // H5P_prp_close_func_cb prp_close) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Piterate iterates over the properties in a property list or class * * @param plist @@ -7501,7 +8208,7 @@ public class H5 implements java.io.Serializable { * zero if all properties have been processed * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * size is null. * @@ -7512,6 +8219,8 @@ public class H5 implements java.io.Serializable { // /////// Object creation property list (OCPL) routines /////// /** + * @ingroup JH5P + * * H5Pget_attr_phase_change retrieves attribute storage phase change thresholds. * * @param ocpl_id @@ -7527,7 +8236,7 @@ public class H5 implements java.io.Serializable { * @return Returns a non-negative value if successful; otherwise returns a negative value. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * size is null. * @@ -7536,6 +8245,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5P + * * H5Pset_attr_phase_change sets threshold values for attribute storage on an object. These * thresholds determine the point at which attribute storage changes * from compact storage (i.e., storage in the object header) @@ -7549,7 +8260,7 @@ public class H5 implements java.io.Serializable { * IN: Minimum number of attributes to be stored in dense storage (Default: 6) * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native void H5Pset_attr_phase_change(long ocpl_id, int max_compact, @@ -7557,6 +8268,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pget_attr_creation_order retrieves the settings for tracking and indexing attribute creation order on * an object. * @@ -7566,13 +8279,15 @@ public class H5 implements java.io.Serializable { * @return Flags specifying whether to track and index attribute creation order * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native int H5Pget_attr_creation_order(long ocpl_id) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pset_attr_creation_order sets flags specifying whether to track and index attribute creation order on * an object. * @@ -7584,13 +8299,15 @@ public class H5 implements java.io.Serializable { * @return Returns a non-negative value if successful; otherwise returns a negative value. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native int H5Pset_attr_creation_order(long ocpl_id, int crt_order_flags) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pget_obj_track_times queries the object creation property list, ocpl_id, to determine whether object * times are being recorded. * @@ -7600,13 +8317,15 @@ public class H5 implements java.io.Serializable { * @return TRUE or FALSE, specifying whether object times are being recorded * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native boolean H5Pget_obj_track_times(long ocpl_id) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pset_obj_track_times sets a property in the object creation property list, ocpl_id, that governs the * recording of times associated with an object. * @@ -7617,13 +8336,15 @@ public class H5 implements java.io.Serializable { * IN: TRUE or FALSE, specifying whether object times are to be tracked * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native void H5Pset_obj_track_times(long ocpl_id, boolean track_times) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pmodify_filter modifies the specified FILTER in the transient or permanent output filter pipeline * depending on whether PLIST is a dataset creation or dataset * transfer property list. The FLAGS argument specifies certain @@ -7663,7 +8384,7 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * name or an array is null. * @@ -7673,6 +8394,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5P + * * H5Pset_filter adds the specified filter and corresponding properties to the end of an output filter * pipeline. * @@ -7690,12 +8413,14 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native int H5Pset_filter(long plist, int filter, int flags, long cd_nelmts, int[] cd_values) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pget_nfilters returns the number of filters defined in the filter pipeline associated with the * property list plist. * @@ -7705,11 +8430,13 @@ public class H5 implements java.io.Serializable { * @return the number of filters in the pipeline if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native int H5Pget_nfilters(long plist) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pget_filter returns information about a filter, specified by its filter number, in a filter pipeline, * specified by the property list with which it is associated. * @@ -7738,7 +8465,7 @@ public class H5 implements java.io.Serializable { * @exception ArrayStoreException * Fatal error on Copyback * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * name or an array is null. * @@ -7752,6 +8479,8 @@ public class H5 implements java.io.Serializable { } /** + * @ingroup JH5P + * * H5Pget_filter2 returns information about a filter, specified by its filter number, in a filter * pipeline, specified by the property list with which it is associated. * @@ -7766,6 +8495,8 @@ public class H5 implements java.io.Serializable { NullPointerException; /** + * @ingroup JH5P + * * H5Pget_filter_by_id returns information about the filter specified in filter_id, a filter identifier. * plist_id must be a dataset or group creation property list and filter_id must be in the associated * filter pipeline. The filter_id and flags parameters are used in the same manner as described in the @@ -7798,7 +8529,7 @@ public class H5 implements java.io.Serializable { * @return the filter identification number if successful. Otherwise returns H5Z_FILTER_ERROR (-1). * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception ArrayIndexOutOfBoundsException * Fatal error on Copyback * @exception ArrayStoreException @@ -7816,6 +8547,8 @@ public class H5 implements java.io.Serializable { } /** + * @ingroup JH5P + * * H5Pget_filter_by_id2 returns information about a filter, specified by its filter id, in a filter * pipeline, specified by the property list with which it is associated. * @@ -7839,7 +8572,7 @@ public class H5 implements java.io.Serializable { * @return the filter identification number if successful. Otherwise returns H5Z_FILTER_ERROR (-1). * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * name or an array is null. * @@ -7850,6 +8583,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5P + * * H5Pall_filters_avail query to verify that all the filters set * in the dataset creation property list are available currently. * @@ -7861,12 +8596,14 @@ public class H5 implements java.io.Serializable { * FALSE if one or more filters not currently available. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native boolean H5Pall_filters_avail(long dcpl_id) throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5P + * * H5Premove_filter deletes a filter from the dataset creation property list; * deletes all filters if filter is H5Z_FILTER_NONE * @@ -7878,12 +8615,14 @@ public class H5 implements java.io.Serializable { * @return a non-negative value and the size of the user block; if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native int H5Premove_filter(long obj_id, long filter) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pset_deflate sets the compression method for a dataset. * * @param plist @@ -7894,11 +8633,13 @@ public class H5 implements java.io.Serializable { * @return non-negative if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native int H5Pset_deflate(long plist, int level) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pset_fletcher32 sets Fletcher32 checksum of EDC for a dataset creation * property list or group creation property list. * @@ -7908,7 +8649,7 @@ public class H5 implements java.io.Serializable { * @return a non-negative value and the size of the user block; if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native int H5Pset_fletcher32(long plist) throws HDF5LibraryException, NullPointerException; @@ -7916,6 +8657,8 @@ public class H5 implements java.io.Serializable { // /////// File creation property list (FCPL) routines /////// /** + * @ingroup JH5P + * * H5Pget_userblock retrieves the size of a user block in a file creation property list. * * @param plist @@ -7926,7 +8669,7 @@ public class H5 implements java.io.Serializable { * @return a non-negative value and the size of the user block; if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * size is null. **/ @@ -7934,6 +8677,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5P + * * H5Pset_userblock sets the user block size of a file creation property list. * * @param plist @@ -7944,11 +8689,13 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native int H5Pset_userblock(long plist, long size) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pget_sizes retrieves the size of the offsets and lengths used in an HDF5 file. This function is only * valid for file creation property lists. * @@ -7964,7 +8711,7 @@ public class H5 implements java.io.Serializable { * @return a non-negative value with the sizes initialized; if successful; * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * size is null. * @exception IllegalArgumentException @@ -7974,6 +8721,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException, IllegalArgumentException; /** + * @ingroup JH5P + * * H5Pset_sizes sets the byte size of the offsets and lengths used to address objects in an HDF5 file. * * @param plist @@ -7986,12 +8735,14 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native int H5Pset_sizes(long plist, int sizeof_addr, int sizeof_size) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pget_sym_k retrieves the size of the symbol table B-tree 1/2 rank and the symbol table leaf node 1/2 * size. * @@ -8008,7 +8759,7 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * size is null. * @exception IllegalArgumentException @@ -8018,6 +8769,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException, IllegalArgumentException; /** + * @ingroup JH5P + * * H5Pset_sym_k sets the size of parameters used to control the symbol table nodes. * * @param plist @@ -8030,12 +8783,14 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native int H5Pset_sym_k(long plist, int ik, int lk) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pget_istore_k queries the 1/2 rank of an indexed storage B-tree. * * @param plist @@ -8046,7 +8801,7 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * ik array is null. **/ @@ -8054,6 +8809,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5P + * * H5Pset_istore_k sets the size of the parameter used to control the B-trees for indexing chunked * datasets. * @@ -8065,11 +8822,13 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native int H5Pset_istore_k(long plist, int ik) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pget_shared_mesg_nindexes retrieves number of shared object header message indexes in file creation * property list. * @@ -8080,13 +8839,15 @@ public class H5 implements java.io.Serializable { * this property list * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native int H5Pget_shared_mesg_nindexes(long fcpl_id) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pset_shared_mesg_nindexes sets the number of shared object header message indexes in the specified * file creation property list. * @@ -8099,7 +8860,7 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful; otherwise returns a negative value. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception IllegalArgumentException * Invalid value of nindexes * @@ -8108,6 +8869,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, IllegalArgumentException; /** + * @ingroup JH5P + * * H5Pget_shared_mesg_index Retrieves the configuration settings for a shared message index. * * @param fcpl_id @@ -8125,7 +8888,7 @@ public class H5 implements java.io.Serializable { * @return Returns a non-negative value if successful; otherwise returns a negative value. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * mesg_info is null. * @exception IllegalArgumentException @@ -8137,6 +8900,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException, IllegalArgumentException; /** + * @ingroup JH5P + * * H5Pset_shared_mesg_index Configures the specified shared object header message index * * @param fcpl_id @@ -8151,7 +8916,7 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful; otherwise returns a negative value. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception IllegalArgumentException * Invalid value of nindexes * @@ -8161,6 +8926,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, IllegalArgumentException; /** + * @ingroup JH5P + * * H5Pget_shared_mesg_phase_change retrieves shared object header message phase change information. * * @param fcpl_id @@ -8177,7 +8944,7 @@ public class H5 implements java.io.Serializable { * @return Returns a non-negative value if successful; otherwise returns a negative value. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * size is null. * @@ -8186,6 +8953,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5P + * * H5Pset_shared_mesg_phase_change sets shared object header message storage phase change thresholds. * * @param fcpl_id @@ -8200,7 +8969,7 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful; otherwise returns a negative value. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception IllegalArgumentException * Invalid values of max_list and min_btree. * @@ -8210,6 +8979,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, IllegalArgumentException; /** + * @ingroup JH5P + * * H5Pset_file_space_strategy sets the file space management strategy for the file associated with fcpl_id * to strategy. There are four strategies that applications can select and they are described in the * Parameters section. @@ -8236,7 +9007,7 @@ public class H5 implements java.io.Serializable { * is not to be modified. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception IllegalArgumentException * Invalid values of max_list and min_btree. * @@ -8246,6 +9017,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, IllegalArgumentException; /** + * @ingroup JH5P + * * H5Pget_file_space_strategy provides the means for applications to manage the HDF5 file's file space * strategy for their specific needs. * @@ -8259,7 +9032,7 @@ public class H5 implements java.io.Serializable { * @return the current free-space strategy. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception IllegalArgumentException * Invalid values of max_list and min_btree. * @@ -8269,6 +9042,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, IllegalArgumentException; /** + * @ingroup JH5P + * * H5Pget_file_space_strategy_persist provides the means for applications to manage the HDF5 file's file * space strategy for their specific needs. * @@ -8278,7 +9053,7 @@ public class H5 implements java.io.Serializable { * @return the current free-space persistence. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception IllegalArgumentException * Invalid values of max_list and min_btree. * @@ -8287,6 +9062,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, IllegalArgumentException; /** + * @ingroup JH5P + * * H5Pget_file_space_strategy_threshold provides the means for applications to manage the HDF5 file's file * space strategy for their specific needs. * @@ -8296,7 +9073,7 @@ public class H5 implements java.io.Serializable { * @return the current free-space section threshold. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception IllegalArgumentException * Invalid values of max_list and min_btree. * @@ -8305,6 +9082,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, IllegalArgumentException; /** + * @ingroup JH5P + * * H5Pset_file_space_page_size retrieves the file space page size for aggregating small metadata or raw * data. * @@ -8315,7 +9094,7 @@ public class H5 implements java.io.Serializable { * * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception IllegalArgumentException * Invalid values of max_list and min_btree. * @@ -8324,6 +9103,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, IllegalArgumentException; /** + * @ingroup JH5P + * * H5Pget_file_space_page_size Sets the file space page size for paged aggregation. * * @param fcpl_id @@ -8332,7 +9113,7 @@ public class H5 implements java.io.Serializable { * @return the current file space page size. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception IllegalArgumentException * Invalid values of max_list and min_btree. * @@ -8343,6 +9124,8 @@ public class H5 implements java.io.Serializable { // /////// File access property list (FAPL) routines /////// /** + * @ingroup JH5P + * * H5Pget_alignment retrieves the current settings for alignment properties from a file access property * list. * @@ -8358,7 +9141,7 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * alignment array is null. * @exception IllegalArgumentException @@ -8368,6 +9151,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException, IllegalArgumentException; /** + * @ingroup JH5P + * * H5Pset_alignment sets the alignment properties of a file access property list so that any file object * >= THRESHOLD bytes will be aligned on an address which is a multiple of ALIGNMENT. * @@ -8381,12 +9166,14 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native int H5Pset_alignment(long plist, long threshold, long alignment) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pget_driver returns the identifier of the low-level file driver associated with the file access * property list or data transfer property list plid. * @@ -8395,11 +9182,13 @@ public class H5 implements java.io.Serializable { * @return a valid low-level driver identifier if successful; a negative value if failed * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. */ public synchronized static native long H5Pget_driver(long plid) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pget_family_offset gets offset for family driver. * * @param fapl_id @@ -8408,12 +9197,14 @@ public class H5 implements java.io.Serializable { * @return the offset. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native long H5Pget_family_offset(long fapl_id) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pset_family_offset sets the offset for family driver. * * @param fapl_id @@ -8424,13 +9215,15 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native int H5Pset_family_offset(long fapl_id, long offset) throws HDF5LibraryException; /** + * @ingroup JH5P + * * Retrieves the maximum possible number of elements in the meta data cache and the maximum possible * number of bytes and the RDCC_W0 value in the raw data chunk cache. * @@ -8448,7 +9241,7 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * an array is null. **/ @@ -8457,6 +9250,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5P + * * H5Pset_cache sets the number of elements (objects) in the meta data cache and the total number of bytes * in the raw data chunk cache. * @@ -8474,13 +9269,15 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native int H5Pset_cache(long plist, int mdc_nelmts, long rdcc_nelmts, long rdcc_nbytes, double rdcc_w0) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pget_mdc_config gets the initial metadata cache configuration contained in a file access property * list. This configuration is used when the file is opened. * @@ -8490,12 +9287,14 @@ public class H5 implements java.io.Serializable { * @return A buffer(H5AC_cache_config_t) for the current metadata cache configuration information * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native H5AC_cache_config_t H5Pget_mdc_config(long plist_id) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pset_mdc_config sets the initial metadata cache configuration contained in a file access property * list and loads it into the instance of H5AC_cache_config_t pointed to by the config_ptr parameter. This * configuration is used when the file is opened. @@ -8506,12 +9305,14 @@ public class H5 implements java.io.Serializable { * IN: H5AC_cache_config_t, the initial metadata cache configuration. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native void H5Pset_mdc_config(long plist_id, H5AC_cache_config_t config_ptr) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pget_gc_references Returns the current setting for the garbage collection references property from a * file access property list. * @@ -8521,11 +9322,13 @@ public class H5 implements java.io.Serializable { * @return GC is on (true) or off (false) * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native boolean H5Pget_gc_references(long fapl_id) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pset_gc_references Sets the flag for garbage collecting references for the file. Default value for * garbage collecting references is off. * @@ -8537,12 +9340,14 @@ public class H5 implements java.io.Serializable { * @return non-negative if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native int H5Pset_gc_references(long fapl_id, boolean gc_ref) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pget_fclose_degree returns the degree for the file close behavior for a file access * property list. * @@ -8552,12 +9357,14 @@ public class H5 implements java.io.Serializable { * @return the degree for the file close behavior * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native int H5Pget_fclose_degree(long fapl_id) throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5P + * * H5Pset_fclose_degree sets the degree for the file close behavior. * * @param fapl_id @@ -8568,12 +9375,14 @@ public class H5 implements java.io.Serializable { * @return non-negative if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native int H5Pset_fclose_degree(long fapl_id, int degree) throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5P + * * H5Pget_meta_block_size the current metadata block size setting. * * @param fapl_id @@ -8582,12 +9391,14 @@ public class H5 implements java.io.Serializable { * @return the minimum size, in bytes, of metadata block allocations. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native long H5Pget_meta_block_size(long fapl_id) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pset_meta_block_size sets the minimum metadata block size. * * @param fapl_id @@ -8596,13 +9407,15 @@ public class H5 implements java.io.Serializable { * IN: Minimum size, in bytes, of metadata block allocations. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native void H5Pset_meta_block_size(long fapl_id, long size) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pget_sieve_buf_size retrieves the current settings for the data sieve buffer size * property from a file access property list. * @@ -8612,11 +9425,13 @@ public class H5 implements java.io.Serializable { * @return a non-negative value and the size of the user block; if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native long H5Pget_sieve_buf_size(long fapl_id) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pset_sieve_buf_size Sets the maximum size of the data seive buffer used for file * drivers which are capable of using data sieving. The data sieve * buffer is used when performing I/O on datasets in the file. Using a @@ -8634,12 +9449,14 @@ public class H5 implements java.io.Serializable { * IN: maximum size of the data seive buffer. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native void H5Pset_sieve_buf_size(long fapl_id, long size) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pget_small_data_block_size retrieves the size of a block of small data in a file creation property * list. * @@ -8649,12 +9466,14 @@ public class H5 implements java.io.Serializable { * @return a non-negative value and the size of the user block; if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native long H5Pget_small_data_block_size(long plist) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pset_small_data_block_size reserves blocks of size bytes for the contiguous storage of the raw data * portion of small datasets. * @@ -8666,12 +9485,14 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native int H5Pset_small_data_block_size(long plist, long size) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pget_libver_bounds retrieves the lower and upper bounds on the HDF5 Library versions that indirectly * determine the object formats versions used when creating objects in the file. * @@ -8688,7 +9509,7 @@ public class H5 implements java.io.Serializable { * @return Returns a non-negative value if successful; otherwise returns a negative value. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * size is null. * @@ -8697,6 +9518,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5P + * * H5Pset_libver_bounds Sets bounds on library versions, and indirectly format versions, to be used when * creating objects * @@ -8711,7 +9534,7 @@ public class H5 implements java.io.Serializable { * @return Returns a non-negative value if successful; otherwise returns a negative value. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception IllegalArgumentException * Argument is Illegal * @@ -8720,6 +9543,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, IllegalArgumentException; /** + * @ingroup JH5P + * * H5Pget_elink_file_cache_size retrieves the size of the external link open file cache. * * @param fapl_id @@ -8728,13 +9553,15 @@ public class H5 implements java.io.Serializable { * @return External link open file cache size in number of files. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native int H5Pget_elink_file_cache_size(long fapl_id) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pset_elink_file_cache_size sets the number of files that can be held open in an external link open * file cache. * @@ -8744,13 +9571,15 @@ public class H5 implements java.io.Serializable { * IN: External link open file cache size in number of files. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native void H5Pset_elink_file_cache_size(long fapl_id, int efc_size) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pset_mdc_log_options sets metadata cache logging options. * * @param fapl_id @@ -8763,7 +9592,7 @@ public class H5 implements java.io.Serializable { * IN: Whether the logging begins as soon as the file is opened or created. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * location is null. * @@ -8773,6 +9602,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5P + * * H5Pget_mdc_log_options gets metadata cache logging options. * * @param fapl_id @@ -8786,13 +9617,15 @@ public class H5 implements java.io.Serializable { * @return the location of log in UTF-8/ASCII (file path/name) (On Windows, this must be ASCII). * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native String H5Pget_mdc_log_options(long fapl_id, boolean[] mdc_log_options) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pget_metadata_read_attempts retrieves the number of read attempts that is set in the file access * property list plist_id. * @@ -8802,13 +9635,15 @@ public class H5 implements java.io.Serializable { * @return The number of read attempts. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native long H5Pget_metadata_read_attempts(long plist_id) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pset_metadata_read_attempts sets the number of reads that the library will try when reading * checksummed metadata in an HDF5 file opened with SWMR access. When reading such metadata, the library * will compare the checksum computed for the metadata just read with the checksum stored within the piece @@ -8823,13 +9658,15 @@ public class H5 implements java.io.Serializable { * IN: The number of read attempts which is a value greater than 0. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native void H5Pset_metadata_read_attempts(long plist_id, long attempts) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pget_evict_on_close retrieves the file access property list setting that determines whether an HDF5 * object will be evicted from the library's metadata cache when it is closed. * @@ -8839,12 +9676,14 @@ public class H5 implements java.io.Serializable { * @return indication if the object will be evicted on close. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native boolean H5Pget_evict_on_close(long fapl_id) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pset_evict_on_close controls the library's behavior of evicting metadata associated with a closed * object. * @@ -8854,13 +9693,15 @@ public class H5 implements java.io.Serializable { * IN: Whether the HDF5 object should be evicted on close. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native void H5Pset_evict_on_close(long fapl_id, boolean evict_on_close) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pget_use_file_locking retrieves whether we are using file locking. * * @param fapl_id @@ -8869,13 +9710,15 @@ public class H5 implements java.io.Serializable { * @return indication if file locking is used. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native boolean H5Pget_use_file_locking(long fapl_id) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pget_use_file_locking retrieves whether we ignore file locks when they are disabled. * * @param fapl_id @@ -8884,13 +9727,15 @@ public class H5 implements java.io.Serializable { * @return indication if file locking is ignored. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native boolean H5Pget_ignore_disabled_file_locking(long fapl_id) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pset_file_locking sets parameters related to file locking. * * @param fapl_id @@ -8904,7 +9749,7 @@ public class H5 implements java.io.Serializable { * IN: Whether file locking will be ignored when disabled on a file system (useful for Lustre). * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native void H5Pset_file_locking(long fapl_id, boolean use_file_locking, @@ -8919,6 +9764,8 @@ public class H5 implements java.io.Serializable { // Dataset creation property list (DCPL) routines // /** + * @ingroup JH5P + * * H5Pget_layout returns the layout of the raw data for a dataset. * * @param plist @@ -8928,11 +9775,13 @@ public class H5 implements java.io.Serializable { * H5D_LAYOUT_ERROR (-1). * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native int H5Pget_layout(long plist) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pset_layout sets the type of storage used store the raw data for a dataset. * * @param plist @@ -8943,11 +9792,13 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native int H5Pset_layout(long plist, int layout) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pget_chunk retrieves the size of chunks for the raw data of a chunked layout dataset. * * @param plist @@ -8960,7 +9811,7 @@ public class H5 implements java.io.Serializable { * @return chunk dimensionality successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * dims array is null. * @exception IllegalArgumentException @@ -8970,6 +9821,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException, IllegalArgumentException; /** + * @ingroup JH5P + * * H5Pset_chunk sets the size of the chunks used to store a chunked layout dataset. * * @param plist @@ -8982,7 +9835,7 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * dims array is null. * @exception IllegalArgumentException @@ -8992,6 +9845,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException, IllegalArgumentException; /** + * @ingroup JH5P + * * H5Pset_chunk sets the size of the chunks used to store a chunked layout dataset. * * @param plist @@ -9004,7 +9859,7 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful * * @exception HDF5Exception - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * dims array is null. * @exception IllegalArgumentException @@ -9028,6 +9883,8 @@ public class H5 implements java.io.Serializable { } /** + * @ingroup JH5P + * * H5Pset_virtual maps elements of the virtual dataset (VDS) described by the * virtual dataspace identifier vspace_id to the elements of the source dataset * described by the source dataset dataspace identifier src_space_id. The source @@ -9051,7 +9908,7 @@ public class H5 implements java.io.Serializable { * selection. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * an name string is null. * @exception IllegalArgumentException @@ -9062,6 +9919,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException, IllegalArgumentException; /** + * @ingroup JH5P + * * H5Pget_virtual_count gets the number of mappings for a virtual dataset that has the creation property * list specified by dcpl_id. * @@ -9071,7 +9930,7 @@ public class H5 implements java.io.Serializable { * @return a non-negative number of mappings if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception IllegalArgumentException * An id is <=0 **/ @@ -9079,6 +9938,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, IllegalArgumentException; /** + * @ingroup JH5P + * * H5Pget_virtual_vspace takes the dataset creation property list for the virtual dataset, dcpl_id, and * the mapping index, index, and returns a dataspace identifier for the selection within the virtual * dataset used in the mapping. @@ -9091,7 +9952,7 @@ public class H5 implements java.io.Serializable { * @return a valid dataspace identifier if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception IllegalArgumentException * An id is <=0 **/ @@ -9099,6 +9960,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, IllegalArgumentException; /** + * @ingroup JH5P + * * H5Pget_virtual_srcspace takes the dataset creation property list for the virtual dataset, dcpl_id, and * the mapping index, index, and returns a dataspace identifier for the selection within the source * dataset used in the mapping. @@ -9111,7 +9974,7 @@ public class H5 implements java.io.Serializable { * @return a valid dataspace identifier if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception IllegalArgumentException * An id is <=0 **/ @@ -9119,6 +9982,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, IllegalArgumentException; /** + * @ingroup JH5P + * * H5Pget_virtual_filename takes the dataset creation property list for the virtual dataset, dcpl_id, the * mapping index, index, the size of the filename for a source dataset, size, and retrieves the name of * the file for a source dataset used in the mapping. @@ -9131,7 +9996,7 @@ public class H5 implements java.io.Serializable { * @return the name of the file containing the source dataset if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception IllegalArgumentException * An id is <=0 **/ @@ -9139,6 +10004,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, IllegalArgumentException; /** + * @ingroup JH5P + * * H5Pget_virtual_dsetname takes the dataset creation property list for the virtual dataset, dcpl_id, the * mapping index, index, the size of the dataset name for a source dataset, size, and retrieves the name * of the source dataset used in the mapping. @@ -9151,7 +10018,7 @@ public class H5 implements java.io.Serializable { * @return the name of the source dataset if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception IllegalArgumentException * An id is <=0 **/ @@ -9168,7 +10035,7 @@ public class H5 implements java.io.Serializable { // * @return VDS link open file cache size in number of files. // * // * @exception HDF5LibraryException - // * Error from the HDF-5 Library. + // * Error from the HDF5 Library. // * // **/ // public synchronized static native int H5Pget_vds_file_cache_size(long fapl_id) throws @@ -9184,13 +10051,15 @@ public class H5 implements java.io.Serializable { // * IN: VDS link open file cache size in number of files. // * // * @exception HDF5LibraryException - // * Error from the HDF-5 Library. + // * Error from the HDF5 Library. // * // **/ // public synchronized static native void H5Pset_vds_file_cache_size(long fapl_id, int efc_size) // throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pget_external returns information about an external file. * * @param plist @@ -9217,7 +10086,7 @@ public class H5 implements java.io.Serializable { * @exception ArrayStoreException * Fatal error on Copyback * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * name or size is null. * @exception IllegalArgumentException @@ -9230,6 +10099,8 @@ public class H5 implements java.io.Serializable { NullPointerException, IllegalArgumentException; /** + * @ingroup JH5P + * * H5Pset_external adds an external file to the list of external files. * * @param plist @@ -9245,7 +10116,7 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * name is null. **/ @@ -9253,6 +10124,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5P + * * H5Pget_external_count returns the number of external files for the specified dataset. * * @param plist @@ -9261,11 +10134,13 @@ public class H5 implements java.io.Serializable { * @return the number of external files if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native int H5Pget_external_count(long plist) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pset_szip Sets up the use of the szip filter. * * @param plist @@ -9278,13 +10153,15 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful; otherwise returns a negative value. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native int H5Pset_szip(long plist, int options_mask, int pixels_per_block) throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5P + * * H5Pset_shuffle Sets up the use of the shuffle filter. * * @param plist_id @@ -9293,13 +10170,15 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful; otherwise returns a negative value. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native int H5Pset_shuffle(long plist_id) throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5P + * * H5Pset_nbit Sets up the use of the N-Bit filter. * * @param plist_id @@ -9308,12 +10187,14 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful; otherwise returns a negative value. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native int H5Pset_nbit(long plist_id) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pset_scaleoffset sets the Scale-Offset filter for a dataset. * * @param plist_id @@ -9326,7 +10207,7 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful; otherwise returns a negative value. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception IllegalArgumentException * Invalid arguments * @@ -9335,6 +10216,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, IllegalArgumentException; /** + * @ingroup JH5P + * * H5Pget_fill_value queries the fill value property of a dataset creation property list. * * @param plist_id @@ -9353,6 +10236,8 @@ public class H5 implements java.io.Serializable { throws HDF5Exception; /** + * @ingroup JH5P + * * H5Pget_fill_value queries the fill value property of a dataset creation property list. * * @param plist_id @@ -9381,6 +10266,8 @@ public class H5 implements java.io.Serializable { } /** + * @ingroup JH5P + * * H5Pset_fill_value sets the fill value for a dataset creation property list. * * @param plist_id @@ -9399,6 +10286,8 @@ public class H5 implements java.io.Serializable { throws HDF5Exception; /** + * @ingroup JH5P + * * H5Pset_fill_value sets the fill value for a dataset creation property list. * * @param plist_id @@ -9427,6 +10316,8 @@ public class H5 implements java.io.Serializable { } /** + * @ingroup JH5P + * * H5Pset_fill_value checks if the fill value is defined for a dataset creation property list. * * @param plist_id @@ -9447,6 +10338,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5P + * * H5Pset_alloc_time Gets space allocation time for dataset during creation. * * @param plist_id @@ -9457,13 +10350,15 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful; otherwise returns a negative value. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native int H5Pget_alloc_time(long plist_id, int[] alloc_time) throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5P + * * H5Pset_alloc_time Sets space allocation time for dataset during creation. * * @param plist_id @@ -9474,13 +10369,15 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful; otherwise returns a negative value. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native int H5Pset_alloc_time(long plist_id, int alloc_time) throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5P + * * H5Pset_fill_time Gets fill value writing time. * * @param plist_id @@ -9491,13 +10388,15 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful; otherwise returns a negative value. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native int H5Pget_fill_time(long plist_id, int[] fill_time) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pset_fill_time Sets the fill value writing time. * * @param plist_id @@ -9508,13 +10407,15 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful; otherwise returns a negative value. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native int H5Pset_fill_time(long plist_id, int fill_time) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pset_chunk_opts Sets the edge chunk option in a dataset creation property list. * * @param dcpl_id @@ -9525,12 +10426,14 @@ public class H5 implements java.io.Serializable { * 0 - Disables option; partial edge chunks will be compressed. * * @exception HDF5LibraryException - * Error from the HDF-5 Library + * Error from the HDF5 Library **/ public synchronized static native void H5Pset_chunk_opts(long dcpl_id, int opts) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pget_chunk_opts retrieves the edge chunk option setting stored in the dataset creation property list * * @param dcpl_id @@ -9539,12 +10442,14 @@ public class H5 implements java.io.Serializable { * @return The edge chunk option setting. * * @exception HDF5LibraryException - * Error from the HDF-5 Library + * Error from the HDF5 Library * */ public synchronized static native int H5Pget_chunk_opts(long dcpl_id) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pget_dset_no_attrs_hint accesses the flag for whether or not datasets created by the given dcpl * will be created with a "minimized" object header. * @@ -9554,12 +10459,14 @@ public class H5 implements java.io.Serializable { * @return true if the given dcpl is set to create minimized dataset object headers, false if not. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native boolean H5Pget_dset_no_attrs_hint(long dcpl_id) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pset_dset_no_attrs_hint sets the dcpl to minimize (or explicitly to not minimized) dataset object * headers upon creation. * @@ -9570,7 +10477,7 @@ public class H5 implements java.io.Serializable { * IN: the minimize hint setting * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native void H5Pset_dset_no_attrs_hint(long dcpl_id, boolean minimize) throws HDF5LibraryException; @@ -9578,6 +10485,8 @@ public class H5 implements java.io.Serializable { // /////// Dataset access property list (DAPL) routines /////// /** + * @ingroup JH5P + * * Retrieves the maximum possible number of elements in the meta data cache and the maximum possible * number of bytes and the RDCC_W0 value in the raw data chunk cache on a per-datset basis. * @@ -9591,7 +10500,7 @@ public class H5 implements java.io.Serializable { * IN/OUT: Preemption policy. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * an array is null. **/ @@ -9600,6 +10509,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5P + * * H5Pset_chunk_cache sets the number of elements (objects) in the meta data cache and the total number of * bytes in the raw data chunk cache on a per-datset basis. * @@ -9613,13 +10524,15 @@ public class H5 implements java.io.Serializable { * IN: Preemption policy. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native void H5Pset_chunk_cache(long dapl_id, long rdcc_nslots, long rdcc_nbytes, double rdcc_w0) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pset_virtual_view takes the access property list for the virtual dataset, dapl_id, and the flag, * view, and sets the VDS view according to the flag value. * @@ -9629,12 +10542,14 @@ public class H5 implements java.io.Serializable { * IN: Flag specifying the extent of the data to be included in the view. * * @exception HDF5LibraryException - * Error from the HDF-5 Library + * Error from the HDF5 Library **/ public synchronized static native void H5Pset_virtual_view(long dapl_id, int view) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pget_virtual_view takes the virtual dataset access property list, dapl_id, and retrieves the flag, * view, set by the H5Pset_virtual_view call. * @@ -9644,12 +10559,14 @@ public class H5 implements java.io.Serializable { * @return The flag specifying the view of the virtual dataset. * * @exception HDF5LibraryException - * Error from the HDF-5 Library + * Error from the HDF5 Library * */ public synchronized static native int H5Pget_virtual_view(long dapl_id) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pset_virtual_printf_gap sets the access property list for the virtual dataset, dapl_id, to instruct * the library to stop looking for the mapped data stored in the files and/or datasets with the * printf-style names after not finding gap_size files and/or datasets. The found source files and @@ -9662,12 +10579,14 @@ public class H5 implements java.io.Serializable { * the extent of an unlimited virtual dataset with printf-style mappings. * * @exception HDF5LibraryException - * Error from the HDF-5 Library + * Error from the HDF5 Library **/ public synchronized static native void H5Pset_virtual_printf_gap(long dapl_id, long gap_size) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pget_virtual_printf_gap returns the maximum number of missing printf-style files and/or datasets for * determining the extent of an unlimited virtual dataaset, gap_size, using the access property list for * the virtual dataset, dapl_id. @@ -9679,13 +10598,15 @@ public class H5 implements java.io.Serializable { * the extent of an unlimited virtual dataset with printf-style mappings. * * @exception HDF5LibraryException - * Error from the HDF-5 Library + * Error from the HDF5 Library * */ public synchronized static native long H5Pget_virtual_printf_gap(long dapl_id) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pget_virtual_prefix Retrieves prefix applied to virtual file paths. * * @param dapl_id @@ -9694,12 +10615,14 @@ public class H5 implements java.io.Serializable { * @return the prefix to be applied to virtual file paths. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native String H5Pget_virtual_prefix(long dapl_id) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pset_virtual_prefix Sets prefix to be applied to virtual file paths. * * @param dapl_id @@ -9708,7 +10631,7 @@ public class H5 implements java.io.Serializable { * IN: Prefix to be applied to virtual file paths * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * prefix is null. * @@ -9717,6 +10640,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5P + * * H5Pget_efile_prefix Retrieves prefix applied to external file paths. * * @param dapl_id @@ -9725,12 +10650,14 @@ public class H5 implements java.io.Serializable { * @return the prefix to be applied to external file paths. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native String H5Pget_efile_prefix(long dapl_id) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pset_efile_prefix Sets prefix to be applied to external file paths. * * @param dapl_id @@ -9739,7 +10666,7 @@ public class H5 implements java.io.Serializable { * IN: Prefix to be applied to external file paths * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * prefix is null. * @@ -9756,6 +10683,8 @@ public class H5 implements java.io.Serializable { // /////// Dataset xfer property list (DXPL) routines /////// /** + * @ingroup JH5P + * * H5Pget_data_transform retrieves the data transform expression previously set in the dataset transfer * property list plist_id by H5Pset_data_transform. * @@ -9771,7 +10700,7 @@ public class H5 implements java.io.Serializable { * * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception IllegalArgumentException * Size is <= 0. * @@ -9781,6 +10710,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, IllegalArgumentException; /** + * @ingroup JH5P + * * H5Pset_data_transform sets a data transform expression * * @param plist_id @@ -9791,7 +10722,7 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful; otherwise returns a negative value. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * expression is null. * @@ -9800,7 +10731,9 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** - * HH5Pget_buffer gets type conversion and background buffers. Returns buffer size, in bytes, if + * @ingroup JH5P + * + * H5Pget_buffer gets type conversion and background buffers. Returns buffer size, in bytes, if * successful; otherwise 0 on failure. * * @param plist @@ -9813,7 +10746,7 @@ public class H5 implements java.io.Serializable { * @return buffer size, in bytes, if successful; otherwise 0 on failure * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception IllegalArgumentException * plist is invalid. **/ @@ -9821,6 +10754,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, IllegalArgumentException; /** + * @ingroup JH5P + * * H5Pget_buffer_size gets type conversion and background buffer size, in bytes, if successful; * otherwise 0 on failure. * @@ -9830,7 +10765,7 @@ public class H5 implements java.io.Serializable { * @return buffer size, in bytes, if successful; otherwise 0 on failure * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception IllegalArgumentException * plist is invalid. **/ @@ -9838,6 +10773,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, IllegalArgumentException; /** + * @ingroup JH5P + * * H5Pset_buffer sets type conversion and background buffers. status to TRUE or FALSE. * * Given a dataset transfer property list, H5Pset_buffer sets the maximum size for the type conversion @@ -9858,7 +10795,7 @@ public class H5 implements java.io.Serializable { * Size, in bytes, of the type conversion and background buffers. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception IllegalArgumentException * plist is invalid. **/ @@ -9866,6 +10803,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, IllegalArgumentException; /** + * @ingroup JH5P + * * H5Pget_edc_check gets the error-detecting algorithm in use. * * @param plist @@ -9874,11 +10813,13 @@ public class H5 implements java.io.Serializable { * @return the error-detecting algorithm * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native int H5Pget_edc_check(long plist) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pset_edc_check sets the error-detecting algorithm. * * @param plist @@ -9889,11 +10830,13 @@ public class H5 implements java.io.Serializable { * @return non-negative if succeed * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native int H5Pset_edc_check(long plist, int check) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pget_btree_ratio Get the B-tree split ratios for a dataset transfer property list. * * @param plist_id @@ -9908,7 +10851,7 @@ public class H5 implements java.io.Serializable { * @return non-negative if succeed * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * an input array is null. **/ @@ -9917,6 +10860,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5P + * * H5Pset_btree_ratio Sets B-tree split ratios for a dataset transfer property list. The split ratios * determine what percent of children go in the first node when a node splits. * @@ -9932,12 +10877,14 @@ public class H5 implements java.io.Serializable { * @return non-negative if succeed * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native int H5Pset_btree_ratios(long plist_id, double left, double middle, double right) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pget_hyper_vector_size reads values previously set with H5Pset_hyper_vector_size. * * @param dxpl_id @@ -9948,13 +10895,15 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful; otherwise returns a negative value. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native int H5Pget_hyper_vector_size(long dxpl_id, long[] vector_size) throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5P + * * H5Pset_hyper_vector_size sets the number of * "I/O vectors" (offset and length pairs) which are to be * accumulated in memory before being issued to the lower levels @@ -9974,7 +10923,7 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful; otherwise returns a negative value. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native int H5Pset_hyper_vector_size(long dxpl_id, long vector_size) @@ -9983,6 +10932,8 @@ public class H5 implements java.io.Serializable { // /////// Link creation property list (LCPL) routines /////// /** + * @ingroup JH5P + * * H5Pget_create_intermediate_group determines whether property is set to enable creating missing * intermediate groups. * @@ -9992,13 +10943,15 @@ public class H5 implements java.io.Serializable { * @return Boolean true or false * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native boolean H5Pget_create_intermediate_group(long lcpl_id) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pset_create_intermediate_group specifies in property list whether to create missing intermediate * groups * @@ -10010,7 +10963,7 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful; otherwise returns a negative value. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native int H5Pset_create_intermediate_group(long lcpl_id, @@ -10020,6 +10973,8 @@ public class H5 implements java.io.Serializable { // /////// Group creation property list (GCPL) routines /////// /** + * @ingroup JH5P + * * H5Pget_local_heap_size_hint Retrieves the anticipated size of the local heap for original-style groups. * * @param gcpl_id @@ -10028,13 +10983,15 @@ public class H5 implements java.io.Serializable { * @return size_hint, the anticipated size of local heap * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native long H5Pget_local_heap_size_hint(long gcpl_id) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pset_local_heap_size_hint Specifies the anticipated maximum size of a local heap. * * @param gcpl_id @@ -10045,13 +11002,15 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful; otherwise returns a negative value. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native int H5Pset_local_heap_size_hint(long gcpl_id, long size_hint) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pget_link_phase_change Queries the settings for conversion between compact and dense groups. * * @param gcpl_id @@ -10068,7 +11027,7 @@ public class H5 implements java.io.Serializable { * @return Returns a non-negative value if successful; otherwise returns a negative value. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * size is null. * @@ -10077,6 +11036,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5P + * * H5Pset_link_phase_change Sets the parameters for conversion between compact and dense groups. * * @param gcpl_id @@ -10089,7 +11050,7 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful; otherwise returns a negative value. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception IllegalArgumentException * Invalid values of max_compact and min_dense. * @@ -10099,6 +11060,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, IllegalArgumentException; /** + * @ingroup JH5P + * * H5Pget_est_link_info Queries data required to estimate required local heap or object header size. * * @param gcpl_id @@ -10115,7 +11078,7 @@ public class H5 implements java.io.Serializable { * @return Returns a non-negative value if successful; otherwise returns a negative value. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * link_info is null. * @@ -10124,6 +11087,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5P + * * H5Pset_est_link_info Sets estimated number of links and length of link names in a group. * * @param gcpl_id @@ -10136,7 +11101,7 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful; otherwise returns a negative value. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception IllegalArgumentException * Invalid values to est_num_entries and est_name_len. * @@ -10146,6 +11111,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, IllegalArgumentException; /** + * @ingroup JH5P + * * H5Pget_link_creation_order queries the group creation property list, gcpl_id, and returns a flag * indicating whether link creation order is tracked and/or indexed in a group. * @@ -10155,13 +11122,15 @@ public class H5 implements java.io.Serializable { * @return crt_order_flags -Creation order flag(s) * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native int H5Pget_link_creation_order(long gcpl_id) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pset_link_creation_order Sets flags in a group creation property list, gcpl_id, for tracking and/or * indexing links on creation order. * @@ -10174,7 +11143,7 @@ public class H5 implements java.io.Serializable { * @return Returns a non-negative value if successful; otherwise returns a negative value. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native int H5Pset_link_creation_order(long gcpl_id, int crt_order_flags) @@ -10183,6 +11152,8 @@ public class H5 implements java.io.Serializable { // /////// String creation property list (STRCPL) routines /////// /** + * @ingroup JH5P + * * H5Pget_char_encoding gets the character encoding of the string. * * @param plist_id @@ -10191,12 +11162,14 @@ public class H5 implements java.io.Serializable { * @return Returns the character encoding of the string. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native int H5Pget_char_encoding(long plist_id) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pset_char_encoding sets the character encoding of the string. * * @param plist_id @@ -10205,7 +11178,7 @@ public class H5 implements java.io.Serializable { * IN: the character encoding of the string * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native void H5Pset_char_encoding(long plist_id, int encoding) @@ -10214,6 +11187,8 @@ public class H5 implements java.io.Serializable { // /////// Link access property list (LAPL) routines /////// /** + * @ingroup JH5P + * * H5Pget_nlinks retrieves the maximum number of soft or user-defined link traversals allowed, nlinks, * before the library assumes it has found a cycle and aborts the traversal. This value is retrieved from * the link access property list lapl_id. @@ -10224,12 +11199,14 @@ public class H5 implements java.io.Serializable { * @return Returns a Maximum number of links to traverse. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native long H5Pget_nlinks(long lapl_id) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pset_nlinks sets the maximum number of soft or user-defined link traversals allowed, nlinks, before * the library assumes it has found a cycle and aborts the traversal. This value is set in the link access * property list lapl_id. @@ -10242,7 +11219,7 @@ public class H5 implements java.io.Serializable { * @return Returns a non-negative value if successful; otherwise returns a negative value. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception IllegalArgumentException * Argument is Illegal * @@ -10251,6 +11228,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, IllegalArgumentException; /** + * @ingroup JH5P + * * H5Pget_elink_prefix Retrieves prefix applied to external link paths. * * @param lapl_id @@ -10262,7 +11241,7 @@ public class H5 implements java.io.Serializable { * the NULL terminator; otherwise returns a negative value. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * prefix is null. * @@ -10271,6 +11250,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5P + * * H5Pset_elink_prefix Sets prefix to be applied to external link paths. * * @param lapl_id @@ -10281,7 +11262,7 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful; otherwise returns a negative value. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * prefix is null. * @@ -10290,6 +11271,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5P + * * H5Pget_elink_fapl Retrieves the file access property list identifier associated with the link access * property list. * @@ -10299,7 +11282,7 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful; otherwise returns a negative value. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public static long H5Pget_elink_fapl(long lapl_id) throws HDF5LibraryException @@ -10316,6 +11299,8 @@ public class H5 implements java.io.Serializable { private synchronized static native long _H5Pget_elink_fapl(long lapl_id) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pset_elink_fapl sets a file access property list for use in accessing a file pointed to by an * external link. * @@ -10327,13 +11312,15 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful; otherwise returns a negative value. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native int H5Pset_elink_fapl(long lapl_id, long fapl_id) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pget_elink_acc_flags retrieves the external link traversal file access flag from the specified link * access property list. * @@ -10343,12 +11330,14 @@ public class H5 implements java.io.Serializable { * @return File access flag for link traversal. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native int H5Pget_elink_acc_flags(long lapl_id) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pset_elink_acc_flags Sets the external link traversal file access flag in a link access property * list. * @@ -10360,7 +11349,7 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful; otherwise returns a negative value. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception IllegalArgumentException * Invalid Flag values. * @@ -10371,6 +11360,8 @@ public class H5 implements java.io.Serializable { // /////// Object copy property list (OCPYPL) routines /////// /** + * @ingroup JH5P + * * H5Pget_copy_object retrieves the properties to be used when an object is copied. * * @param ocp_plist_id @@ -10379,12 +11370,14 @@ public class H5 implements java.io.Serializable { * @return Copy option(s) set in the object copy property list * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native int H5Pget_copy_object(long ocp_plist_id) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pset_copy_object Sets properties to be used when an object is copied. * * @param ocp_plist_id @@ -10393,7 +11386,7 @@ public class H5 implements java.io.Serializable { * IN: Copy option(s) to be set * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native void H5Pset_copy_object(long ocp_plist_id, int copy_options) @@ -10402,6 +11395,8 @@ public class H5 implements java.io.Serializable { // /////// file drivers property list routines /////// /** + * @ingroup JH5P + * * H5Pget_fapl_core retrieve H5FD_CORE I/O settings. * * @param fapl_id @@ -10412,7 +11407,7 @@ public class H5 implements java.io.Serializable { * OUT: write to file name on flush setting * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native void H5Pget_fapl_core(long fapl_id, long[] increment, @@ -10420,6 +11415,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5P + * * H5Pset_fapl_core modifies the file access property list to use the H5FD_CORE driver. * * @param fapl_id @@ -10432,7 +11429,7 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful; otherwise returns a negative value. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native int H5Pset_fapl_core(long fapl_id, long increment, @@ -10440,6 +11437,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5P + * * H5Pget_fapl_direct queries properties set by the H5Pset_fapl_direct. * * @param fapl_id @@ -10452,13 +11451,15 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful; otherwise returns a negative value. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native int H5Pget_fapl_direct(long fapl_id, long[] info) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pset_fapl_direct Sets up use of the direct I/O driver. * * @param fapl_id @@ -10473,13 +11474,15 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful; otherwise returns a negative value. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native int H5Pset_fapl_direct(long fapl_id, long alignment, long block_size, long cbuf_size) throws HDF5LibraryException; /** + * @ingroup JH5P + * * H5Pget_fapl_family Returns information about the family file access property list. * * @param fapl_id @@ -10492,7 +11495,7 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful; otherwise returns a negative value. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native int H5Pget_fapl_family(long fapl_id, long[] memb_size, @@ -10500,6 +11503,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5P + * * H5Pset_fapl_family Sets up use of the direct I/O driver. * * @param fapl_id @@ -10512,13 +11517,15 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful; otherwise returns a negative value. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native int H5Pset_fapl_family(long fapl_id, long memb_size, long memb_fapl_id) throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5P + * * H5Pset_fapl_hdfs Modify the file access property list to use the H5FD_HDFS driver. * * @param fapl_id @@ -10529,13 +11536,15 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful; otherwise returns a negative value. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native int H5Pset_fapl_hdfs(long fapl_id, H5FD_hdfs_fapl_t fapl_conf) throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5P + * * H5Pget_fapl_hdfs gets the properties hdfs I/O driver. * * @param fapl_id @@ -10544,13 +11553,15 @@ public class H5 implements java.io.Serializable { * @return the properties of the hdfs driver. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native H5FD_hdfs_fapl_t H5Pget_fapl_hdfs(long fapl_id) throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5P + * * H5Pget_fapl_multi Sets up use of the multi I/O driver. * * @param fapl_id @@ -10568,7 +11579,7 @@ public class H5 implements java.io.Serializable { * @return a boolean value; Allows read-only access to incomplete file sets when TRUE. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * an array is null. * @@ -10579,6 +11590,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5P + * * H5Pset_fapl_multi Sets up use of the multi I/O driver. * * @param fapl_id @@ -10596,7 +11609,7 @@ public class H5 implements java.io.Serializable { * IN: Allows read-only access to incomplete file sets when TRUE. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * an array is null. * @@ -10607,6 +11620,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5P + * * H5Pset_fapl_log Sets up the logging virtual file driver (H5FD_LOG) for use. H5Pset_fapl_log modifies * the file access property list to use the logging driver, H5FD_LOG. The logging virtual file driver * (VFD) is a clone of the standard SEC2 (H5FD_SEC2) driver with additional facilities for logging VFD @@ -10622,7 +11637,7 @@ public class H5 implements java.io.Serializable { * IN: The size of the logging buffers, in bytes. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * logfile is null. **/ @@ -10631,6 +11646,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5P + * * H5Pset_fapl_sec2 Sets up use of the sec2 I/O driver. * * @param fapl_id @@ -10639,13 +11656,15 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful; otherwise returns a negative value. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native int H5Pset_fapl_sec2(long fapl_id) throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5P + * * H5Pset_fapl_split Sets up use of the split I/O driver. Makes the multi driver act like the * old split driver which stored meta data in one file and raw * data in another file @@ -10662,7 +11681,7 @@ public class H5 implements java.io.Serializable { * IN: File access property list identifier raw data * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native void @@ -10670,6 +11689,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5P + * * H5Pset_fapl_stdio Sets up use of the stdio I/O driver. * * @param fapl_id @@ -10678,13 +11699,15 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful; otherwise returns a negative value. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native int H5Pset_fapl_stdio(long fapl_id) throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5P + * * H5Pset_fapl_windows Sets up use of the windows I/O driver. * * @param fapl_id @@ -10693,13 +11716,15 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful; otherwise returns a negative value. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native int H5Pset_fapl_windows(long fapl_id) throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5P + * * H5Pset_fapl_ros3 Modify the file access property list to use the H5FD_ROS3 driver. * * @param fapl_id @@ -10710,13 +11735,15 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful; otherwise returns a negative value. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native int H5Pset_fapl_ros3(long fapl_id, H5FD_ros3_fapl_t fapl_conf) throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5P + * * H5Pget_fapl_ros3 gets the properties of the ros3 I/O driver. * * @param fapl_id @@ -10725,7 +11752,7 @@ public class H5 implements java.io.Serializable { * @return the properties of the ros3 driver. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * **/ public synchronized static native H5FD_ros3_fapl_t H5Pget_fapl_ros3(long fapl_id) @@ -10813,6 +11840,16 @@ public class H5 implements java.io.Serializable { // // // //////////////////////////////////////////////////////////// /** + * @defgroup JH5PL Java Plugin (H5PL) Interface + * + * @see H5PL, C-API + * + * @see @ref H5PL_UG, User Guide + **/ + + /** + * @ingroup JH5PL + * * H5PLset_loading_state uses one argument to enable or disable individual plugins. * The plugin_flags parameter is an encoded integer in which each bit controls a specific * plugin or class of plugins. @@ -10834,12 +11871,14 @@ public class H5 implements java.io.Serializable { * * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native void H5PLset_loading_state(int plugin_flags) throws HDF5LibraryException; /** + * @ingroup JH5PL + * * H5PLget_loading_state retrieves the state of the dynamic plugins flag, plugin_flags.. * * @return the list of dynamic plugin types that are enabled or disabled. @@ -10849,33 +11888,39 @@ public class H5 implements java.io.Serializable { * If the value of plugin_flags is 0 (zero), all dynamic plugins are disabled. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native int H5PLget_loading_state() throws HDF5LibraryException; /** + * @ingroup JH5PL + * * H5PLappend inserts the plugin path at the end of the table. * * @param plugin_path * IN: Path for location of filter plugin libraries. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native void H5PLappend(String plugin_path) throws HDF5LibraryException; /** + * @ingroup JH5PL + * * H5PLprepend inserts the plugin path at the beginning of the table. * * @param plugin_path * IN: Path for location of filter plugin libraries. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native void H5PLprepend(String plugin_path) throws HDF5LibraryException; /** + * @ingroup JH5PL + * * H5PLreplace replaces the plugin path at the specified index. * * @param plugin_path @@ -10884,12 +11929,14 @@ public class H5 implements java.io.Serializable { * IN: The table index (0-based). * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native void H5PLreplace(String plugin_path, int index) throws HDF5LibraryException; /** + * @ingroup JH5PL + * * H5PLinsert inserts the plugin path at the specified index. * * @param plugin_path @@ -10898,23 +11945,27 @@ public class H5 implements java.io.Serializable { * IN: The table index (0-based). * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native void H5PLinsert(String plugin_path, int index) throws HDF5LibraryException; /** + * @ingroup JH5PL + * * H5PLremove removes the plugin path at the specified index. * * @param index * IN: The table index (0-based). * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native void H5PLremove(int index) throws HDF5LibraryException; /** + * @ingroup JH5PL + * * H5PLget retrieves the plugin path at the specified index. * * @param index @@ -10923,29 +11974,256 @@ public class H5 implements java.io.Serializable { * @return the current path at the index in plugin path table * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native String H5PLget(int index) throws HDF5LibraryException; /** + * @ingroup JH5PL + * * H5PLsize retrieves the size of the current list of plugin paths. * * @return the current number of paths in the plugin path table * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native int H5PLsize() throws HDF5LibraryException; // //////////////////////////////////////////////////////////// // // - // H5R: HDF5 1.12 Reference API Functions // + // H5R: HDF5 1.8 Reference API Functions // // // // //////////////////////////////////////////////////////////// - // Constructors // + /** + * @defgroup JH5R Java Reference (H5R) Interface + * + * @see H5R, C-API + * + * @see @ref H5R_UG, User Guide + **/ + + private synchronized static native int H5Rcreate(byte[] ref, long loc_id, String name, int ref_type, + long space_id) + throws HDF5LibraryException, NullPointerException, IllegalArgumentException; + + /** + * @ingroup JH5R + * + * H5Rcreate creates the reference, ref, of the type specified in ref_type, pointing to the object name + * located at loc_id. + * + * @param loc_id + * IN: Location identifier used to locate the object being pointed to. + * @param name + * IN: Name of object at location loc_id. + * @param ref_type + * IN: Type of reference. + * @param space_id + * IN: Dataspace identifier with selection. + * + * @return the reference (byte[]) if successful + * + * @exception HDF5LibraryException + * Error from the HDF5 Library. + * @exception NullPointerException + * an input array is null. + * @exception IllegalArgumentException + * an input array is invalid. + **/ + public synchronized static byte[] H5Rcreate(long loc_id, String name, int ref_type, long space_id) + throws HDF5LibraryException, NullPointerException, IllegalArgumentException + { + /* These sizes are correct for HDF5.1.2 */ + int ref_size = 8; + if (ref_type == HDF5Constants.H5R_DATASET_REGION) + ref_size = 12; + + byte rbuf[] = new byte[ref_size]; + + /* will raise an exception if fails */ + H5Rcreate(rbuf, loc_id, name, ref_type, space_id); + + return rbuf; + } /** + * @ingroup JH5R + * + * Given a reference to some object, H5Rdereference opens that object and return an identifier. + * + * @param dataset + * IN: Dataset containing reference object. + * @param access_list + * IN: Property list of the object being referenced. + * @param ref_type + * IN: The reference type of ref. + * @param ref + * IN: reference to an object + * + * @return valid identifier if successful + * + * @exception HDF5LibraryException + * Error from the HDF5 Library. + * @exception NullPointerException + * output array is null. + * @exception IllegalArgumentException + * output array is invalid. + **/ + public static long H5Rdereference(long dataset, long access_list, int ref_type, byte[] ref) + throws HDF5LibraryException, NullPointerException, IllegalArgumentException + { + long id = _H5Rdereference(dataset, access_list, ref_type, ref); + if (id > 0) { + log.trace("OPEN_IDS: H5Rdereference add {}", id); + OPEN_IDS.add(id); + log.trace("OPEN_IDS: {}", OPEN_IDS.size()); + } + return id; + } + + private synchronized static native long _H5Rdereference(long dataset, long access_list, int ref_type, + byte[] ref) + throws HDF5LibraryException, NullPointerException, IllegalArgumentException; + + /** + * @ingroup JH5R + * + * H5Rget_name retrieves a name for the object identified by ref. + * + * @param loc_id + * IN: Identifier for the dataset containing the reference or for the group that dataset is in. + * @param ref_type + * IN: Type of reference. + * @param ref + * IN: An object or dataset region reference. + * @param name + * OUT: A name associated with the referenced object or dataset region. + * @param size + * IN: The size of the name buffer. + * + * @return Returns the length of the name if successful, returning 0 (zero) if no name is associated with + * the identifier. Otherwise returns a negative value. + * + * @exception HDF5LibraryException + * Error from the HDF5 Library. + * @exception NullPointerException + * size is null. + * @exception IllegalArgumentException + * Argument is illegal. + **/ + public synchronized static native long H5Rget_name(long loc_id, int ref_type, byte[] ref, String[] name, + long size) + throws HDF5LibraryException, NullPointerException, IllegalArgumentException; + + /** + * @ingroup JH5R + * + * H5Rget_name_string retrieves a name for the object identified by ref. + * + * @param loc_id + * IN: Identifier for the dataset containing the reference or for the group that dataset is in. + * @param ref_type + * IN: Type of reference. + * @param ref + * IN: An object or dataset region reference. + * + * @return Returns the name if successful, returning null if no name is associated with + * the identifier. + * + * @exception HDF5LibraryException + * Error from the HDF5 Library. + * @exception NullPointerException + * size is null. + * @exception IllegalArgumentException + * Argument is illegal. + **/ + public synchronized static native String H5Rget_name_string(long loc_id, int ref_type, byte[] ref) + throws HDF5LibraryException, NullPointerException, IllegalArgumentException; + + /** + * @ingroup JH5R + * + * H5Rget_obj_type Given a reference to an object ref, H5Rget_obj_type returns the type of the object + * pointed to. + * + * @param loc_id + * IN: loc_id of the reference object. + * @param ref_type + * IN: Type of reference to query. + * @param ref + * IN: the reference + * + * @return Returns the object type + * + * @exception HDF5LibraryException + * Error from the HDF5 Library. + * @exception NullPointerException + * an input array is null. + * @exception IllegalArgumentException + * an input array is invalid. + **/ + public synchronized static native int H5Rget_obj_type(long loc_id, int ref_type, byte ref[]) + throws HDF5LibraryException, NullPointerException, IllegalArgumentException; + + /** + * @ingroup JH5R + * + * H5Rget_obj_type2 Retrieves the type of object that an object reference points to. + * + * @see public static int H5Rget_obj_type(int loc_id, int ref_type, byte ref[]) + **/ + private synchronized static native int H5Rget_obj_type2(long loc_id, int ref_type, byte ref[], + int[] obj_type) + throws HDF5LibraryException, NullPointerException, IllegalArgumentException; + + /** + * @ingroup JH5R + * + * Given a reference to an object ref, H5Rget_region creates a copy of the dataspace of the dataset + * pointed to and defines a selection in the copy which is the region pointed to. + * + * @param loc_id + * IN: loc_id of the reference object. + * @param ref_type + * IN: The reference type of ref. + * @param ref + * OUT: the reference to the object and region + * + * @return a valid identifier if successful + * + * @exception HDF5LibraryException + * Error from the HDF5 Library. + * @exception NullPointerException + * an input array is null. + * @exception IllegalArgumentException + * an input array is invalid. + **/ + public static long H5Rget_region(long loc_id, int ref_type, byte[] ref) + throws HDF5LibraryException, NullPointerException, IllegalArgumentException + { + long id = _H5Rget_region(loc_id, ref_type, ref); + if (id > 0) { + log.trace("OPEN_IDS: H5Rget_region add {}", id); + OPEN_IDS.add(id); + log.trace("OPEN_IDS: {}", OPEN_IDS.size()); + } + return id; + } + + private synchronized static native long _H5Rget_region(long loc_id, int ref_type, byte[] ref) + throws HDF5LibraryException, NullPointerException, IllegalArgumentException; + + // //////////////////////////////////////////////////////////// + // // + // H5R: HDF5 1.12 Reference API Functions // + // // + // //////////////////////////////////////////////////////////// + + /** + * @ingroup JH5R + * * H5Rcreate_object creates a reference pointing to the object named name located at loc id. * * @param loc_id @@ -10958,7 +12236,7 @@ public class H5 implements java.io.Serializable { * @return the reference (byte[]) if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * an input array is null. * @exception IllegalArgumentException @@ -10968,6 +12246,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException, IllegalArgumentException; /** + * @ingroup JH5R + * * H5Rcreate_region creates the reference, pointing to the region represented by * space id within the object named name located at loc id. * @@ -10983,7 +12263,7 @@ public class H5 implements java.io.Serializable { * @return the reference (byte[]) if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * an input array is null. * @exception IllegalArgumentException @@ -10994,6 +12274,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException, IllegalArgumentException; /** + * @ingroup JH5R + * * H5Rcreate_attr creates the reference, pointing to the attribute named attr name * and attached to the object named name located at loc id. * @@ -11009,7 +12291,7 @@ public class H5 implements java.io.Serializable { * @return the reference (byte[]) if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * an input array is null. * @exception IllegalArgumentException @@ -11020,13 +12302,15 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException, IllegalArgumentException; /** + * @ingroup JH5R + * * H5Rdestroy destroys a reference and releases resources. * * @param ref_ptr * IN: Reference to an object, region or attribute attached to an object. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * an input array is null. * @exception IllegalArgumentException @@ -11036,6 +12320,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException, IllegalArgumentException; /** + * @ingroup JH5R + * * H5Rget_type retrieves the type of a reference. * * @param ref_ptr @@ -11044,7 +12330,7 @@ public class H5 implements java.io.Serializable { * @return a valid reference type if successful; otherwise returns H5R UNKNOWN. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * an input array is null. * @exception IllegalArgumentException @@ -11054,6 +12340,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException, IllegalArgumentException; /** + * @ingroup JH5R + * * H5Requal determines whether two references point to the same object, region or attribute. * * @param ref1_ptr @@ -11064,7 +12352,7 @@ public class H5 implements java.io.Serializable { * @return true if equal, else false * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * an input array is null. * @exception IllegalArgumentException @@ -11074,6 +12362,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException, IllegalArgumentException; /** + * @ingroup JH5R + * * H5Rcopy creates a copy of an existing reference. * * @param src_ref_ptr @@ -11082,7 +12372,7 @@ public class H5 implements java.io.Serializable { * @return a valid copy of the reference (byte[]) if successful. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * an input array is null. * @exception IllegalArgumentException @@ -11092,6 +12382,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException, IllegalArgumentException; /** + * @ingroup JH5R + * * H5Ropen_object opens that object and returns an identifier. * The object opened with this function should be closed when it is no longer needed * so that resource leaks will not develop. Use the appropriate close function such @@ -11111,7 +12403,7 @@ public class H5 implements java.io.Serializable { * @return a valid identifier if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * an input array is null. * @exception IllegalArgumentException @@ -11133,6 +12425,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException, IllegalArgumentException; /** + * @ingroup JH5R + * * H5Ropen region creates a copy of the dataspace of the dataset pointed to by a region reference, * ref ptr, and defines a selection matching the selection pointed to by ref ptr within the dataspace * copy. Use H5Sclose to release the dataspace identifier returned by this function when the identifier is @@ -11152,7 +12446,7 @@ public class H5 implements java.io.Serializable { * @return a valid dataspace identifier if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * an input array is null. * @exception IllegalArgumentException @@ -11174,6 +12468,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException, IllegalArgumentException; /** + * @ingroup JH5R + * * H5Ropen_attr opens the attribute attached to the object and returns an identifier. * The attribute opened with this function should be closed with H5Aclose when it is no longer needed * so that resource leaks will not develop. @@ -11192,7 +12488,7 @@ public class H5 implements java.io.Serializable { * @return a valid attribute identifier if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * an input array is null. * @exception IllegalArgumentException @@ -11213,9 +12509,9 @@ public class H5 implements java.io.Serializable { private synchronized static native long _H5Ropen_attr(byte[] ref_ptr, long rapl_id, long aapl_id) throws HDF5LibraryException, NullPointerException, IllegalArgumentException; - // Get type // - /** + * @ingroup JH5R + * * H5Rget obj type3 retrieves the type of the referenced object pointed to. * * @param ref_ptr @@ -11228,7 +12524,7 @@ public class H5 implements java.io.Serializable { * @return Returns the object type * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * array is null. * @exception IllegalArgumentException @@ -11238,6 +12534,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException, IllegalArgumentException; /** + * @ingroup JH5R + * * H5Rget_file_name retrieves the file name for the object, region or attribute reference pointed to. * * @param ref_ptr @@ -11246,7 +12544,7 @@ public class H5 implements java.io.Serializable { * @return Returns the file name of the reference * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * array is null. * @exception IllegalArgumentException @@ -11256,6 +12554,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException, IllegalArgumentException; /** + * @ingroup JH5R + * * H5Rget_obj_name retrieves the object name for the object, region or attribute reference pointed to. * * @param ref_ptr @@ -11268,7 +12568,7 @@ public class H5 implements java.io.Serializable { * @return Returns the object name of the reference * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * array is null. * @exception IllegalArgumentException @@ -11278,6 +12578,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException, IllegalArgumentException; /** + * @ingroup JH5R + * * H5Rget_attr_name retrieves the attribute name for the object, region or attribute reference pointed to. * * @param ref_ptr @@ -11286,7 +12588,7 @@ public class H5 implements java.io.Serializable { * @return Returns the attribute name of the reference * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * array is null. * @exception IllegalArgumentException @@ -11297,218 +12599,26 @@ public class H5 implements java.io.Serializable { // //////////////////////////////////////////////////////////// // // - // H5R: HDF5 1.8 Reference API Functions // + // H5S: Dataspace Interface Functions // // // // //////////////////////////////////////////////////////////// - - private synchronized static native int H5Rcreate(byte[] ref, long loc_id, String name, int ref_type, - long space_id) - throws HDF5LibraryException, NullPointerException, IllegalArgumentException; - - /** - * H5Rcreate creates the reference, ref, of the type specified in ref_type, pointing to the object name - * located at loc_id. - * - * @param loc_id - * IN: Location identifier used to locate the object being pointed to. - * @param name - * IN: Name of object at location loc_id. - * @param ref_type - * IN: Type of reference. - * @param space_id - * IN: Dataspace identifier with selection. - * - * @return the reference (byte[]) if successful - * - * @exception HDF5LibraryException - * Error from the HDF-5 Library. - * @exception NullPointerException - * an input array is null. - * @exception IllegalArgumentException - * an input array is invalid. - **/ - public synchronized static byte[] H5Rcreate(long loc_id, String name, int ref_type, long space_id) - throws HDF5LibraryException, NullPointerException, IllegalArgumentException - { - /* These sizes are correct for HDF5.1.2 */ - int ref_size = 8; - if (ref_type == HDF5Constants.H5R_DATASET_REGION) { - ref_size = 12; - } - byte rbuf[] = new byte[ref_size]; - - /* will raise an exception if fails */ - H5Rcreate(rbuf, loc_id, name, ref_type, space_id); - - return rbuf; - } - - /** - * Given a reference to some object, H5Rdereference opens that object and return an identifier. - * - * @param dataset - * IN: Dataset containing reference object. - * @param access_list - * IN: Property list of the object being referenced. - * @param ref_type - * IN: The reference type of ref. - * @param ref - * IN: reference to an object - * - * @return valid identifier if successful - * - * @exception HDF5LibraryException - * Error from the HDF-5 Library. - * @exception NullPointerException - * output array is null. - * @exception IllegalArgumentException - * output array is invalid. - **/ - public static long H5Rdereference(long dataset, long access_list, int ref_type, byte[] ref) - throws HDF5LibraryException, NullPointerException, IllegalArgumentException - { - long id = _H5Rdereference(dataset, access_list, ref_type, ref); - if (id > 0) { - log.trace("OPEN_IDS: H5Rdereference add {}", id); - OPEN_IDS.add(id); - log.trace("OPEN_IDS: {}", OPEN_IDS.size()); - } - return id; - } - - private synchronized static native long _H5Rdereference(long dataset, long access_list, int ref_type, - byte[] ref) - throws HDF5LibraryException, NullPointerException, IllegalArgumentException; - - /** - * H5Rget_name retrieves a name for the object identified by ref. - * - * @param loc_id - * IN: Identifier for the dataset containing the reference or for the group that dataset is in. - * @param ref_type - * IN: Type of reference. - * @param ref - * IN: An object or dataset region reference. - * @param name - * OUT: A name associated with the referenced object or dataset region. - * @param size - * IN: The size of the name buffer. - * - * @return Returns the length of the name if successful, returning 0 (zero) if no name is associated with - * the identifier. Otherwise returns a negative value. - * - * - * @exception HDF5LibraryException - * Error from the HDF-5 Library. - * @exception NullPointerException - * size is null. - * @exception IllegalArgumentException - * Argument is illegal. - * - **/ - public synchronized static native long H5Rget_name(long loc_id, int ref_type, byte[] ref, String[] name, - long size) - throws HDF5LibraryException, NullPointerException, IllegalArgumentException; - - /** - * H5Rget_name_string retrieves a name for the object identified by ref. - * - * @param loc_id - * IN: Identifier for the dataset containing the reference or for the group that dataset is in. - * @param ref_type - * IN: Type of reference. - * @param ref - * IN: An object or dataset region reference. - * - * @return Returns the name if successful, returning null if no name is associated with - * the identifier. - * - * @exception HDF5LibraryException - * Error from the HDF-5 Library. - * @exception NullPointerException - * size is null. - * @exception IllegalArgumentException - * Argument is illegal. - **/ - public synchronized static native String H5Rget_name_string(long loc_id, int ref_type, byte[] ref) - throws HDF5LibraryException, NullPointerException, IllegalArgumentException; - /** - * H5Rget_obj_type Given a reference to an object ref, H5Rget_obj_type returns the type of the object - * pointed to. - * - * @param loc_id - * IN: loc_id of the reference object. - * @param ref_type - * IN: Type of reference to query. - * @param ref - * IN: the reference - * - * @return Returns the object type + * @defgroup JH5S Java Dataspace (H5S) Interface * - * @exception HDF5LibraryException - * Error from the HDF-5 Library. - * @exception NullPointerException - * array is null. - * @exception IllegalArgumentException - * array is invalid. - **/ - public synchronized static native int H5Rget_obj_type(long loc_id, int ref_type, byte ref[]) - throws HDF5LibraryException, NullPointerException, IllegalArgumentException; - - /** - * H5Rget_obj_type2 Retrieves the type of object that an object reference points to. + * @see H5S, C-API * - * @see public static int H5Rget_obj_type(int loc_id, int ref_type, byte ref[]) + * @see @ref H5S_UG, User Guide **/ - private synchronized static native int H5Rget_obj_type2(long loc_id, int ref_type, byte ref[], - int[] obj_type) - throws HDF5LibraryException, NullPointerException, IllegalArgumentException; /** - * Given a reference to an object ref, H5Rget_region creates a copy of the dataspace of the dataset - * pointed to and defines a selection in the copy which is the region pointed to. - * - * @param loc_id - * IN: loc_id of the reference object. - * @param ref_type - * IN: The reference type of ref. - * @param ref - * OUT: the reference to the object and region - * - * @return a valid identifier if successful - * - * @exception HDF5LibraryException - * Error from the HDF-5 Library. - * @exception NullPointerException - * output array is null. - * @exception IllegalArgumentException - * output array is invalid. + * @defgroup JH5S Java Dataspace (H5S) Interface **/ - public static long H5Rget_region(long loc_id, int ref_type, byte[] ref) - throws HDF5LibraryException, NullPointerException, IllegalArgumentException - { - long id = _H5Rget_region(loc_id, ref_type, ref); - if (id > 0) { - log.trace("OPEN_IDS: H5Rget_region add {}", id); - OPEN_IDS.add(id); - log.trace("OPEN_IDS: {}", OPEN_IDS.size()); - } - return id; - } - - private synchronized static native long _H5Rget_region(long loc_id, int ref_type, byte[] ref) - throws HDF5LibraryException, NullPointerException, IllegalArgumentException; - - // //////////////////////////////////////////////////////////// - // // - // H5S: Dataspace Interface Functions // - // // - // //////////////////////////////////////////////////////////// /**************** Operations on dataspaces ********************/ /** + * @ingroup JH5S + * * H5Screate creates a new dataspace of a particular type. * * @param type @@ -11517,7 +12627,7 @@ public class H5 implements java.io.Serializable { * @return a dataspace identifier * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public static long H5Screate(int type) throws HDF5LibraryException { @@ -11533,6 +12643,8 @@ public class H5 implements java.io.Serializable { private synchronized static native long _H5Screate(int type) throws HDF5LibraryException; /** + * @ingroup JH5S + * * H5Screate_simple creates a new simple data space and opens it for access. * * @param rank @@ -11545,7 +12657,7 @@ public class H5 implements java.io.Serializable { * @return a dataspace identifier * * @exception HDF5Exception - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * dims or maxdims is null. **/ @@ -11565,6 +12677,8 @@ public class H5 implements java.io.Serializable { throws HDF5Exception, NullPointerException; /** + * @ingroup JH5S + * * H5Sset_extent_simple sets or resets the size of an existing dataspace. * * @param space_id @@ -11579,13 +12693,15 @@ public class H5 implements java.io.Serializable { * @return a dataspace identifier if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native long H5Sset_extent_simple(long space_id, int rank, long[] current_size, long[] maximum_size) throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5S + * * H5Sset_extent_simple sets or resets the size of an existing dataspace. * * @param space_id @@ -11600,7 +12716,7 @@ public class H5 implements java.io.Serializable { * @return a dataspace identifier if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static long H5Sset_extent_simple(long space_id, int rank, byte[] current_size, byte[] maximum_size) @@ -11615,6 +12731,8 @@ public class H5 implements java.io.Serializable { } /** + * @ingroup JH5S + * * H5Scopy creates a new dataspace which is an exact copy of the dataspace identified by space_id. * * @param space_id @@ -11622,7 +12740,7 @@ public class H5 implements java.io.Serializable { * @return a dataspace identifier if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public static long H5Scopy(long space_id) throws HDF5LibraryException { @@ -11638,6 +12756,8 @@ public class H5 implements java.io.Serializable { private synchronized static native long _H5Scopy(long space_id) throws HDF5LibraryException; /** + * @ingroup JH5S + * * H5Sclose releases a dataspace. * * @param space_id @@ -11646,7 +12766,7 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public static int H5Sclose(long space_id) throws HDF5LibraryException { @@ -11662,6 +12782,8 @@ public class H5 implements java.io.Serializable { private synchronized static native int _H5Sclose(long space_id) throws HDF5LibraryException; /** + * @ingroup JH5S + * * H5Sencode converts a data space description into binary form in a buffer. * * @param obj_id @@ -11670,12 +12792,14 @@ public class H5 implements java.io.Serializable { * @return the buffer for the object to be encoded into. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native byte[] H5Sencode(long obj_id) throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5S + * * H5Sdecode reconstructs the HDF5 data space object and returns a new object handle for it. * * @param buf @@ -11684,7 +12808,7 @@ public class H5 implements java.io.Serializable { * @return a new object handle * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * buf is null. **/ @@ -11692,19 +12816,24 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5S + * * H5Sget_simple_extent_npoints determines the number of elements in a dataspace. * * @param space_id * ID of the dataspace object to query + * * @return the number of elements in the dataspace if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native long H5Sget_simple_extent_npoints(long space_id) throws HDF5LibraryException; /** + * @ingroup JH5S + * * H5Sget_simple_extent_ndims determines the dimensionality (or rank) of a dataspace. * * @param space_id @@ -11713,12 +12842,14 @@ public class H5 implements java.io.Serializable { * @return the number of dimensions in the dataspace if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native int H5Sget_simple_extent_ndims(long space_id) throws HDF5LibraryException; /** + * @ingroup JH5S + * * H5Sget_simple_extent_dims returns the size and maximum sizes of each dimension of a dataspace through * the dims and maxdims parameters. * @@ -11732,7 +12863,7 @@ public class H5 implements java.io.Serializable { * @return the number of dimensions in the dataspace if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * dims or maxdims is null. **/ @@ -11741,6 +12872,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5S + * * H5Sis_simple determines whether a dataspace is a simple dataspace. * * @param space_id @@ -11749,11 +12882,13 @@ public class H5 implements java.io.Serializable { * @return true if is a simple dataspace * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native boolean H5Sis_simple(long space_id) throws HDF5LibraryException; /** + * @ingroup JH5S + * * H5Sget_simple_extent_type queries a dataspace to determine the current class of a dataspace. * * @param space_id @@ -11762,12 +12897,14 @@ public class H5 implements java.io.Serializable { * @return a dataspace class name if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native int H5Sget_simple_extent_type(long space_id) throws HDF5LibraryException; /** + * @ingroup JH5S + * * H5Sset_extent_none removes the extent from a dataspace and sets the type to H5S_NONE. * * @param space_id @@ -11776,11 +12913,13 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native int H5Sset_extent_none(long space_id) throws HDF5LibraryException; /** + * @ingroup JH5S + * * H5Sextent_copy copies the extent from source_space_id to dest_space_id. This action may change the type * of the dataspace. * @@ -11792,12 +12931,14 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native int H5Sextent_copy(long dest_space_id, long source_space_id) throws HDF5LibraryException; /** + * @ingroup JH5S + * * H5Sextent_equal determines whether the dataspace extents of two dataspaces, space1_id and space2_id, * are equal. * @@ -11809,7 +12950,7 @@ public class H5 implements java.io.Serializable { * @return true if successful, else false * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native boolean H5Sextent_equal(long first_space_id, long second_space_id) throws HDF5LibraryException; @@ -11817,6 +12958,8 @@ public class H5 implements java.io.Serializable { /***************** Operations on dataspace selections *****************/ /** + * @ingroup JH5S + * * H5Sget_select_type retrieves the type of selection currently defined for the dataspace space_id. * * @param space_id @@ -11825,11 +12968,13 @@ public class H5 implements java.io.Serializable { * @return the dataspace selection type if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native int H5Sget_select_type(long space_id) throws HDF5LibraryException; /** + * @ingroup JH5S + * * H5Sget_select_npoints determines the number of elements in the current selection of a dataspace. * * @param space_id @@ -11838,11 +12983,13 @@ public class H5 implements java.io.Serializable { * @return the number of elements in the selection if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native long H5Sget_select_npoints(long space_id) throws HDF5LibraryException; /** + * @ingroup JH5S + * * H5Sselect_copy copies all the selection information (including offset) from the source * dataspace to the destination dataspace. * @@ -11852,12 +12999,14 @@ public class H5 implements java.io.Serializable { * ID of the source dataspace * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native void H5Sselect_copy(long dst_id, long src_id) throws HDF5LibraryException; /** + * @ingroup JH5S + * * H5Sselect_valid verifies that the selection for the dataspace. * * @param space_id @@ -11866,11 +13015,13 @@ public class H5 implements java.io.Serializable { * @return true if the selection is contained within the extent and FALSE if it is not or is an error. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native boolean H5Sselect_valid(long space_id) throws HDF5LibraryException; /** + * @ingroup JH5S + * * H5Sselect_adjust moves a selection by subtracting an offset from it. * * @param space_id @@ -11879,7 +13030,7 @@ public class H5 implements java.io.Serializable { * Offset to subtract * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * offset is null. **/ @@ -11887,8 +13038,10 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5S + * * H5Sget_select_bounds retrieves the coordinates of the bounding box containing the current selection and - * places them into user-supplied buffers.

      The start and end buffers must be large enough to hold the + * places them into user-supplied buffers.

      The start and end buffers must be large enough to hold the * dataspace rank number of coordinates. * * @param space_id @@ -11901,7 +13054,7 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful,with start and end initialized. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * start or end is null. **/ @@ -11909,25 +13062,29 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5S + * * H5Sselect_shape_same checks to see if the current selection in the dataspaces are the same * dimensionality and shape. * This is primarily used for reading the entire selection in one swoop. * - * @param space1_id + * @param space1_id * ID of 1st Dataspace pointer to compare - * @param space2_id + * @param space2_id * ID of 2nd Dataspace pointer to compare * * @return true if the selection is the same dimensionality and shape; * false otherwise * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native boolean H5Sselect_shape_same(long space1_id, long space2_id) throws HDF5LibraryException; /** + * @ingroup JH5S + * * H5Sselect_intersect_block checks to see if the current selection in the * dataspace intersects with the block given. * @@ -11942,7 +13099,7 @@ public class H5 implements java.io.Serializable { * FALSE otherwise * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * offset is null. **/ @@ -11951,6 +13108,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5S + * * H5Soffset_simple sets the offset of a simple dataspace space_id. * * @param space_id @@ -11961,7 +13120,7 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * offset array is null. **/ @@ -11969,6 +13128,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5S + * * H5Soffset_simple sets the offset of a simple dataspace space_id. * * @param space_id @@ -11979,7 +13140,7 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * offset array is null. **/ @@ -12000,6 +13161,8 @@ public class H5 implements java.io.Serializable { } /** + * @ingroup JH5S + * * H5Sselect_all selects the entire extent of the dataspace space_id. * * @param space_id @@ -12008,23 +13171,28 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native int H5Sselect_all(long space_id) throws HDF5LibraryException; /** + * @ingroup JH5S + * * H5Sselect_none resets the selection region for the dataspace space_id to include no elements. * * @param space_id * IN: The identifier of the dataspace to be reset. + * * @return a non-negative value if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native int H5Sselect_none(long space_id) throws HDF5LibraryException; /** + * @ingroup JH5S + * * H5Sselect_elements selects array elements to be included in the selection for the space_id dataspace. * * @param space_id @@ -12039,13 +13207,15 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ private synchronized static native int H5Sselect_elements(long space_id, int op, int num_elements, byte[] coord) throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5S + * * H5Sselect_elements selects array elements to be included in the selection for the space_id dataspace. * * @param space_id @@ -12062,7 +13232,7 @@ public class H5 implements java.io.Serializable { * @exception HDF5Exception * Error in the data conversion * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * cord array is **/ @@ -12084,6 +13254,8 @@ public class H5 implements java.io.Serializable { } /** + * @ingroup JH5S + * * H5Sget_select_elem_npoints returns the number of element points in the current dataspace selection. * * @param spaceid @@ -12092,12 +13264,14 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native long H5Sget_select_elem_npoints(long spaceid) throws HDF5LibraryException; /** + * @ingroup JH5S + * * H5Sget_select_elem_pointlist returns an array of of element points in the current dataspace selection. * The point coordinates have the same dimensionality (rank) as the dataspace they are located within, one * coordinate per point. @@ -12114,7 +13288,7 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * buf is null. **/ @@ -12123,6 +13297,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5S + * * H5Sselect_hyperslab selects a hyperslab region to add to the current selected region for the dataspace * specified by space_id. The start, stride, count, and block arrays must be the same size as the rank of * the dataspace. @@ -12143,7 +13319,7 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * an input array is null. * @exception IllegalArgumentException @@ -12166,6 +13342,8 @@ public class H5 implements java.io.Serializable { } /** + * @ingroup JH5S + * * H5Sselect_hyperslab selects a hyperslab region to add to the current selected region for the dataspace * specified by space_id. The start, stride, count, and block arrays must be the same size as the rank of * the dataspace. @@ -12186,7 +13364,7 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * an input array is null. * @exception IllegalArgumentException @@ -12197,6 +13375,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException, IllegalArgumentException; /** + * @ingroup JH5S + * * H5Scombine_hyperslab combines a hyperslab selection with the current selection for a dataspace, * creating a new dataspace to return the generated selection. * If the current selection is not a hyperslab, it is freed and the hyperslab @@ -12220,7 +13400,7 @@ public class H5 implements java.io.Serializable { * @return a dataspace ID on success / H5I_INVALID_HID on failure * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * an input array is null. * @exception IllegalArgumentException @@ -12231,8 +13411,10 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException, IllegalArgumentException; /** + * @ingroup JH5S + * * H5Smodify_select refine an existing hyperslab selection with an operation, using a second - * hyperslab. The first selection is modified to contain the result of + * hyperslab. The first selection is modified to contain the result of * space1 operated on by space2. * * @param space1_id @@ -12243,12 +13425,14 @@ public class H5 implements java.io.Serializable { * ID of the source dataspace * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native void H5Smodify_select(long space1_id, int op, long space2_id) throws HDF5LibraryException; /** + * @ingroup JH5S + * * H5Scombine_select combines two existing hyperslab selections with an operation, returning * a new dataspace with the resulting selection. The dataspace extent from * space1 is copied for the dataspace extent of the newly created dataspace. @@ -12263,12 +13447,14 @@ public class H5 implements java.io.Serializable { * @return a dataspace ID on success / H5I_INVALID_HID on failure * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native long H5Scombine_select(long space1_id, int op, long space2_id) throws HDF5LibraryException; /** + * @ingroup JH5S + * * H5Sis_regular_hyperslab retrieves a regular hyperslab selection for the dataspace specified * by space_id. * @@ -12278,12 +13464,14 @@ public class H5 implements java.io.Serializable { * @return a TRUE/FALSE for hyperslab selection if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native boolean H5Sis_regular_hyperslab(long space_id) throws HDF5LibraryException; /** + * @ingroup JH5S + * * H5Sget_regular_hyperslab determines if a hyperslab selection is regular for the dataspace specified * by space_id. The start, stride, count, and block arrays must be the same size as the rank of the * dataspace. @@ -12300,7 +13488,7 @@ public class H5 implements java.io.Serializable { * OUT: Size of block in hyperslab. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * an output array is null. * @exception IllegalArgumentException @@ -12311,6 +13499,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException, IllegalArgumentException; /** + * @ingroup JH5S + * * H5Sget_select_hyper_nblocks returns the number of hyperslab blocks in the current dataspace selection. * * @param spaceid @@ -12319,12 +13509,14 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native long H5Sget_select_hyper_nblocks(long spaceid) throws HDF5LibraryException; /** + * @ingroup JH5S + * * H5Sget_select_hyper_blocklist returns an array of hyperslab blocks. The block coordinates have the same * dimensionality (rank) as the dataspace they are located within. The list of blocks is formatted as * follows: @@ -12350,7 +13542,7 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * buf is null. **/ @@ -12359,23 +13551,25 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5S + * * H5Sselect_project_intersection projects the intersection of the selections of src_space_id and * src_intersect_space_id within the selection of src_space_id as a * selection within the selection of dst_space_id. * * @param src_space_id - * Selection that is mapped to dst_space_id, and intersected with src_intersect_space_id + * Selection that is mapped to dst_space_id, and intersected with src_intersect_space_id * @param dst_space_id - * Selection that is mapped to src_space_id + * Selection that is mapped to src_space_id * @param src_intersect_space_id - * Selection whose intersection with src_space_id is projected to dst_space_id to obtain the - * result + * Selection whose intersection with src_space_id is projected to dst_space_id to obtain the + * result * * @return a dataspace with a selection equal to the intersection of * src_intersect_space_id and src_space_id projected from src_space to dst_space on success * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native long H5Sselect_project_intersection(long src_space_id, long dst_space_id, long src_intersect_space_id) @@ -12398,8 +13592,21 @@ public class H5 implements java.io.Serializable { // H5T: Datatype Interface Functions // // // // //////////////////////////////////////////////////////////// + /** + * @defgroup JH5T Java Datatype (H5T) Interface + * + * @see H5T, C-API + * + * @see @ref H5T_UG, User Guide + **/ + + /** + * @defgroup JH5T Java Datatype (H5T) Interface + **/ /** + * @ingroup JH5T + * * H5Tarray_create creates a new array datatype object. * * @param base_id @@ -12412,7 +13619,7 @@ public class H5 implements java.io.Serializable { * @return a valid datatype identifier * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * dim is null. **/ @@ -12432,6 +13639,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5T + * * H5Tclose releases a datatype. * * @param type_id @@ -12440,7 +13649,7 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public static int H5Tclose(long type_id) throws HDF5LibraryException { @@ -12456,6 +13665,8 @@ public class H5 implements java.io.Serializable { private synchronized static native int _H5Tclose(long type_id) throws HDF5LibraryException; /** + * @ingroup JH5T + * * H5Tcommit saves a transient datatype as an immutable named datatype in a file. * * @param loc_id @@ -12472,7 +13683,7 @@ public class H5 implements java.io.Serializable { * IN: Datatype access property list. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * name is null. **/ @@ -12481,6 +13692,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5T + * * H5Tcommit_anon commits a transient datatype (not immutable) to a file, turning it into a named datatype * with the specified creation and property lists. * @@ -12494,12 +13707,14 @@ public class H5 implements java.io.Serializable { * IN: Datatype access property list. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native void H5Tcommit_anon(long loc_id, long type_id, long tcpl_id, long tapl_id) throws HDF5LibraryException; /** + * @ingroup JH5T + * * H5Tcommitted queries a type to determine whether the type specified by the type identifier is a named * type or a transient type. * @@ -12509,11 +13724,13 @@ public class H5 implements java.io.Serializable { * @return true the datatype has been committed * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native boolean H5Tcommitted(long type_id) throws HDF5LibraryException; /** + * @ingroup JH5T + * * H5Tcompiler_conv finds out whether the library's conversion function from type src_id to type dst_id is * a compiler (hard) conversion. * @@ -12523,13 +13740,15 @@ public class H5 implements java.io.Serializable { * IN: Identifier of destination datatype. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native void H5Tcompiler_conv(long src_id, long dst_id) throws HDF5LibraryException; /** - ** H5Tconvert converts nelmts elements from the type specified by the src_id identifier to type dst_id. + * @ingroup JH5T + * + * H5Tconvert converts nelmts elements from the type specified by the src_id identifier to type dst_id. * * @param src_id * IN: Identifier of source datatype. @@ -12545,7 +13764,7 @@ public class H5 implements java.io.Serializable { * IN: Dataset transfer property list identifier. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * buf is null. **/ @@ -12556,6 +13775,8 @@ public class H5 implements java.io.Serializable { // int H5Tconvert(int src_id, int dst_id, long nelmts, Pointer buf, Pointer background, int plist_id); /** + * @ingroup JH5T + * * H5Tcopy copies an existing datatype. The returned type is always transient and unlocked. * * @param type_id @@ -12565,7 +13786,7 @@ public class H5 implements java.io.Serializable { * @return a datatype identifier if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public static long H5Tcopy(long type_id) throws HDF5LibraryException { @@ -12581,6 +13802,8 @@ public class H5 implements java.io.Serializable { private synchronized static native long _H5Tcopy(long type_id) throws HDF5LibraryException; /** + * @ingroup JH5T + * * H5Tcreate creates a new dataype of the specified class with the specified number of bytes. * * @param tclass @@ -12591,7 +13814,7 @@ public class H5 implements java.io.Serializable { * @return datatype identifier * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public static long H5Tcreate(int tclass, long size) throws HDF5LibraryException { @@ -12607,6 +13830,8 @@ public class H5 implements java.io.Serializable { private synchronized static native long _H5Tcreate(int type, long size) throws HDF5LibraryException; /** + * @ingroup JH5T + * * H5Tdecode reconstructs the HDF5 data type object and returns a new object handle for it. * * @param buf @@ -12615,7 +13840,7 @@ public class H5 implements java.io.Serializable { * @return a new object handle * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * buf is null. **/ @@ -12634,6 +13859,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5T + * * H5Tdetect_class determines whether the datatype specified in dtype_id contains any datatypes of the * datatype class specified in dtype_class. * @@ -12645,12 +13872,14 @@ public class H5 implements java.io.Serializable { * @return true if the datatype specified in dtype_id contains any datatypes of the datatype class * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native boolean H5Tdetect_class(long type_id, int cls) throws HDF5LibraryException; /** + * @ingroup JH5T + * * H5Tencode converts a data type description into binary form in a buffer. * * @param obj_id @@ -12664,7 +13893,7 @@ public class H5 implements java.io.Serializable { * @return the size needed for the allocated buffer. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * buf is null. **/ @@ -12672,6 +13901,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; // /** + // * @ingroup JH5T + // * // * H5Tencode converts a data type description into binary form in a buffer. // * // * @param obj_id @@ -12680,12 +13911,14 @@ public class H5 implements java.io.Serializable { // * @return the buffer for the object to be encoded into. // * // * @exception HDF5LibraryException - // * Error from the HDF-5 Library. + // * Error from the HDF5 Library. // **/ // public synchronized static native byte[] H5Tencode(int obj_id) - // throws HDF5LibraryException; + // throws HDF5LibraryException; /** + * @ingroup JH5T + * * H5Tenum_create creates a new enumeration datatype based on the specified base datatype, parent_id, * which must be an integer type. * @@ -12695,7 +13928,7 @@ public class H5 implements java.io.Serializable { * @return the datatype identifier for the new enumeration datatype * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public static long H5Tenum_create(long base_id) throws HDF5LibraryException { @@ -12711,6 +13944,8 @@ public class H5 implements java.io.Serializable { private synchronized static native long _H5Tenum_create(long base_id) throws HDF5LibraryException; /** + * @ingroup JH5T + * * H5Tenum_insert inserts a new enumeration datatype member into an enumeration datatype. * * @param type @@ -12721,7 +13956,7 @@ public class H5 implements java.io.Serializable { * IN: The value of the member, data of the correct type * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * name is null. **/ @@ -12729,6 +13964,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5T + * * H5Tenum_insert inserts a new enumeration datatype member into an enumeration datatype. * * @param type @@ -12741,7 +13978,7 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * name is null. **/ @@ -12752,6 +13989,8 @@ public class H5 implements java.io.Serializable { } /** + * @ingroup JH5T + * * H5Tenum_insert inserts a new enumeration datatype member into an enumeration datatype. * * @param type @@ -12764,7 +14003,7 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * name is null. **/ @@ -12779,6 +14018,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5T + * * H5Tenum_nameof finds the symbol name that corresponds to the specified value of the enumeration * datatype type. * @@ -12792,7 +14033,7 @@ public class H5 implements java.io.Serializable { * @return the symbol name. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * value is null. **/ @@ -12802,6 +14043,8 @@ public class H5 implements java.io.Serializable { // int H5Tenum_nameof(int type, Pointer value, Buffer name/* out */, long size); /** + * @ingroup JH5T + * * H5Tenum_nameof finds the symbol name that corresponds to the specified value of the enumeration * datatype type. * @@ -12817,7 +14060,7 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * name is null. **/ @@ -12831,6 +14074,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5T + * * H5Tenum_valueof finds the value that corresponds to the specified name of the enumeration datatype * type. * @@ -12842,12 +14087,14 @@ public class H5 implements java.io.Serializable { * OUT: The value of the member * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native void H5Tenum_valueof(long type, String name, byte[] value) throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5T + * * H5Tenum_valueof finds the value that corresponds to the specified name of the enumeration datatype * type. * @@ -12861,7 +14108,7 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * name is null. **/ @@ -12875,6 +14122,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5T + * * H5Tequal determines whether two datatype identifiers refer to the same datatype. * * @param type_id1 @@ -12885,12 +14134,14 @@ public class H5 implements java.io.Serializable { * @return true if the datatype identifiers refer to the same datatype, else false. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native boolean H5Tequal(long type_id1, long type_id2) throws HDF5LibraryException; /** + * @ingroup JH5T + * * H5Tget_array_dims returns the sizes of the dimensions of the specified array datatype object. * * @param type_id @@ -12901,7 +14152,7 @@ public class H5 implements java.io.Serializable { * @return the non-negative number of dimensions of the array type * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * dims is null. **/ @@ -12912,6 +14163,8 @@ public class H5 implements java.io.Serializable { } /** + * @ingroup JH5T + * * H5Tget_array_dims2 returns the sizes of the dimensions of the specified array datatype object. * * @param type_id @@ -12922,7 +14175,7 @@ public class H5 implements java.io.Serializable { * @return the non-negative number of dimensions of the array type * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * dims is null. **/ @@ -12930,6 +14183,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5T + * * H5Tget_array_ndims returns the rank, the number of dimensions, of an array datatype object. * * @param type_id @@ -12938,11 +14193,13 @@ public class H5 implements java.io.Serializable { * @return the rank of the array * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native int H5Tget_array_ndims(long type_id) throws HDF5LibraryException; /** + * @ingroup JH5T + * * H5Tget_class returns the datatype class identifier. * * @param type_id @@ -12951,11 +14208,13 @@ public class H5 implements java.io.Serializable { * @return datatype class identifier if successful; otherwise H5T_NO_CLASS(-1). * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native int H5Tget_class(long type_id) throws HDF5LibraryException; /** + * @ingroup JH5T + * * H5Tget_class_name returns the datatype class identifier. * * @param class_id @@ -12996,6 +14255,8 @@ public class H5 implements java.io.Serializable { } /** + * @ingroup JH5T + * * H5Tget_create_plist returns a property list identifier for the datatype creation property list * associated with the datatype specified by type_id. * @@ -13005,7 +14266,7 @@ public class H5 implements java.io.Serializable { * @return a datatype property list identifier. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public static long H5Tget_create_plist(long type_id) throws HDF5LibraryException { @@ -13021,6 +14282,8 @@ public class H5 implements java.io.Serializable { private synchronized static native long _H5Tget_create_plist(long type_id) throws HDF5LibraryException; /** + * @ingroup JH5T + * * H5Tget_cset retrieves the character set type of a string datatype. * * @param type_id @@ -13029,11 +14292,13 @@ public class H5 implements java.io.Serializable { * @return a valid character set type if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native int H5Tget_cset(long type_id) throws HDF5LibraryException; /** + * @ingroup JH5T + * * H5Tset_cset the character set to be used. * * @param type_id @@ -13044,11 +14309,13 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native int H5Tset_cset(long type_id, int cset) throws HDF5LibraryException; /** + * @ingroup JH5T + * * H5Tget_ebias retrieves the exponent bias of a floating-point type. * * @param type_id @@ -13057,11 +14324,13 @@ public class H5 implements java.io.Serializable { * @return the bias if successful; otherwise 0. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native int H5Tget_ebias(long type_id) throws HDF5LibraryException; /** + * @ingroup JH5T + * * H5Tset_ebias sets the exponent bias of a floating-point type. * * @param type_id @@ -13072,7 +14341,7 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public static int H5Tset_ebias(long type_id, int ebias) throws HDF5LibraryException { @@ -13081,6 +14350,8 @@ public class H5 implements java.io.Serializable { } /** + * @ingroup JH5T + * * H5Tget_ebias retrieves the exponent bias of a floating-point type. * * @param type_id @@ -13089,11 +14360,13 @@ public class H5 implements java.io.Serializable { * @return the bias * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native long H5Tget_ebias_long(long type_id) throws HDF5LibraryException; /** + * @ingroup JH5T + * * H5Tset_ebias sets the exponent bias of a floating-point type. * * @param type_id @@ -13102,11 +14375,13 @@ public class H5 implements java.io.Serializable { * IN: Exponent bias value. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native void H5Tset_ebias(long type_id, long ebias) throws HDF5LibraryException; /** + * @ingroup JH5T + * * H5Tget_fields retrieves information about the locations of the various bit fields of a floating point * datatype. * @@ -13123,7 +14398,7 @@ public class H5 implements java.io.Serializable { *

    * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * fields is null. * @exception IllegalArgumentException @@ -13133,6 +14408,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException, IllegalArgumentException; /** + * @ingroup JH5T + * * H5Tget_fields retrieves information about the locations of the various bit fields of a floating point * datatype. * @@ -13141,7 +14418,7 @@ public class H5 implements java.io.Serializable { * @param fields * OUT: location of size and bit-position. * - *
    +     * 
          *      fields[0] = spos  OUT: location to return size of in bits.
          *      fields[1] = epos  OUT: location to return exponent bit-position.
          *      fields[2] = esize OUT: location to return size of exponent in bits.
    @@ -13152,7 +14429,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            fields is null.
          * @exception IllegalArgumentException
    @@ -13168,6 +14445,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException, IllegalArgumentException;
     
         /**
    +     * @ingroup JH5T
    +     *
          * H5Tset_fields sets the locations and sizes of the various floating point bit fields.
          *
          * @param type_id
    @@ -13184,12 +14463,14 @@ public class H5 implements java.io.Serializable {
          *            IN: Size of mantissa in bits.
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          **/
         public synchronized static native void H5Tset_fields(long type_id, long spos, long epos, long esize,
                                                              long mpos, long msize) throws HDF5LibraryException;
     
         /**
    +     * @ingroup JH5T
    +     *
          * H5Tset_fields sets the locations and sizes of the various floating point bit fields.
          *
          * @param type_id
    @@ -13208,7 +14489,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          **/
         public static int H5Tset_fields(long type_id, int spos, int epos, int esize, int mpos, int msize)
             throws HDF5LibraryException
    @@ -13218,6 +14499,8 @@ public class H5 implements java.io.Serializable {
         }
     
         /**
    +     * @ingroup JH5T
    +     *
          * H5Tget_inpad retrieves the internal padding type for unused bits in floating-point datatypes.
          *
          * @param type_id
    @@ -13226,11 +14509,13 @@ public class H5 implements java.io.Serializable {
          * @return a valid padding type if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          **/
         public synchronized static native int H5Tget_inpad(long type_id) throws HDF5LibraryException;
     
         /**
    +     * @ingroup JH5T
    +     *
          * If any internal bits of a floating point type are unused (that is, those significant bits which are not
          * part of the sign, exponent, or mantissa), then H5Tset_inpad will be filled according to the value of
          * the padding value property inpad.
    @@ -13243,11 +14528,13 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *             Error from the HDF5 Library.
          **/
         public synchronized static native int H5Tset_inpad(long type_id, int inpad) throws HDF5LibraryException;
     
         /**
    +     * @ingroup JH5T
    +     *
          * H5Tget_member_class returns the class of datatype of the specified member.
          *
          * @param type_id
    @@ -13258,12 +14545,14 @@ public class H5 implements java.io.Serializable {
          * @return the class of the datatype of the field if successful;
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          **/
         public synchronized static native int H5Tget_member_class(long type_id, int membno)
             throws HDF5LibraryException;
     
         /**
    +     * @ingroup JH5T
    +     *
          * H5Tget_member_index retrieves the index of a field of a compound datatype.
          *
          * @param type_id
    @@ -13274,12 +14563,14 @@ public class H5 implements java.io.Serializable {
          * @return if field is defined, the index; else negative.
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          **/
         public synchronized static native int H5Tget_member_index(long type_id, String field_name)
             throws HDF5LibraryException;
     
         /**
    +     * @ingroup JH5T
    +     *
          * H5Tget_member_name retrieves the name of a field of a compound datatype or an element of an enumeration
          * datatype.
          *
    @@ -13291,14 +14582,16 @@ public class H5 implements java.io.Serializable {
          * @return a valid pointer to the name if successful; otherwise null.
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          **/
         public synchronized static native String H5Tget_member_name(long type_id, int field_idx)
             throws HDF5LibraryException;
     
         /**
    +     * @ingroup JH5T
    +     *
          * H5Tget_member_offset returns the byte offset of the specified member of the compound datatype. This is
    -     * the byte offset in the HDF-5 file/library, NOT the offset of any Java object which might be mapped to
    +     * the byte offset in the HDF5 file/library, NOT the offset of any Java object which might be mapped to
          * this data item.
          *
          * @param type_id
    @@ -13311,6 +14604,8 @@ public class H5 implements java.io.Serializable {
         public synchronized static native long H5Tget_member_offset(long type_id, int membno);
     
         /**
    +     * @ingroup JH5T
    +     *
          * H5Tget_member_type returns the datatype of the specified member.
          *
          * @param type_id
    @@ -13321,7 +14616,7 @@ public class H5 implements java.io.Serializable {
          * @return the identifier of a copy of the datatype of the field if successful;
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          **/
         public static long H5Tget_member_type(long type_id, int field_idx) throws HDF5LibraryException
         {
    @@ -13338,6 +14633,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException;
     
         /**
    +     * @ingroup JH5T
    +     *
          * H5Tget_member_value returns the value of the enumeration datatype member memb_no.
          *
          * @param type_id
    @@ -13348,7 +14645,7 @@ public class H5 implements java.io.Serializable {
          *            OUT: The value of the member
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            value is null.
          **/
    @@ -13356,6 +14653,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5T
    +     *
          * H5Tget_member_value returns the value of the enumeration datatype member memb_no.
          *
          * @param type_id
    @@ -13368,7 +14667,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            value is null.
          **/
    @@ -13382,6 +14681,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5T
    +     *
          * H5Tget_native_type returns the equivalent native datatype for the datatype specified in type_id.
          *
          * @param type_id
    @@ -13391,7 +14692,7 @@ public class H5 implements java.io.Serializable {
          * @return the native datatype identifier for the specified dataset datatype.
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          **/
         public synchronized static long H5Tget_native_type(long type_id) throws HDF5LibraryException
         {
    @@ -13399,6 +14700,8 @@ public class H5 implements java.io.Serializable {
         }
     
         /**
    +     * @ingroup JH5T
    +     *
          * H5Tget_native_type returns the equivalent native datatype for the datatype specified in type_id.
          *
          * @param type_id
    @@ -13409,7 +14712,7 @@ public class H5 implements java.io.Serializable {
          * @return the native datatype identifier for the specified dataset datatype.
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          **/
         public static long H5Tget_native_type(long type_id, int direction) throws HDF5LibraryException
         {
    @@ -13426,6 +14729,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException;
     
         /**
    +     * @ingroup JH5T
    +     *
          * H5Tget_nmembers retrieves the number of fields a compound datatype has.
          *
          * @param type_id
    @@ -13434,11 +14739,13 @@ public class H5 implements java.io.Serializable {
          * @return number of members datatype has if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          **/
         public synchronized static native int H5Tget_nmembers(long type_id) throws HDF5LibraryException;
     
         /**
    +     * @ingroup JH5T
    +     *
          * H5Tget_norm retrieves the mantissa normalization of a floating-point datatype.
          *
          * @param type_id
    @@ -13447,11 +14754,13 @@ public class H5 implements java.io.Serializable {
          * @return a valid normalization type if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          **/
         public synchronized static native int H5Tget_norm(long type_id) throws HDF5LibraryException;
     
         /**
    +     * @ingroup JH5T
    +     *
          * H5Tset_norm sets the mantissa normalization of a floating-point datatype.
          *
          * @param type_id
    @@ -13462,11 +14771,13 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          **/
         public synchronized static native int H5Tset_norm(long type_id, int norm) throws HDF5LibraryException;
     
         /**
    +     * @ingroup JH5T
    +     *
          * H5Tget_offset retrieves the bit offset of the first significant bit.
          *
          * @param type_id
    @@ -13475,11 +14786,13 @@ public class H5 implements java.io.Serializable {
          * @return a positive offset value if successful; otherwise 0.
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          **/
         public synchronized static native int H5Tget_offset(long type_id) throws HDF5LibraryException;
     
         /**
    +     * @ingroup JH5T
    +     *
          * H5Tset_offset sets the bit offset of the first significant bit.
          *
          * @param type_id
    @@ -13490,7 +14803,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          **/
         public static int H5Tset_offset(long type_id, int offset) throws HDF5LibraryException
         {
    @@ -13499,6 +14812,8 @@ public class H5 implements java.io.Serializable {
         }
     
         /**
    +     * @ingroup JH5T
    +     *
          * H5Tset_offset sets the bit offset of the first significant bit.
          *
          * @param type_id
    @@ -13507,12 +14822,14 @@ public class H5 implements java.io.Serializable {
          *            IN: Offset of first significant bit.
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          **/
         public synchronized static native void H5Tset_offset(long type_id, long offset)
             throws HDF5LibraryException;
     
         /**
    +     * @ingroup JH5T
    +     *
          * H5Tget_order returns the byte order of an atomic datatype.
          *
          * @param type_id
    @@ -13521,11 +14838,13 @@ public class H5 implements java.io.Serializable {
          * @return a byte order constant if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          **/
         public synchronized static native int H5Tget_order(long type_id) throws HDF5LibraryException;
     
         /**
    +     * @ingroup JH5T
    +     *
          * H5Tset_order sets the byte ordering of an atomic datatype.
          *
          * @param type_id
    @@ -13536,11 +14855,13 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          **/
         public synchronized static native int H5Tset_order(long type_id, int order) throws HDF5LibraryException;
     
         /**
    +     * @ingroup JH5T
    +     *
          * H5Tget_pad retrieves the padding type of the least and most-significant bit padding.
          *
          * @param type_id
    @@ -13556,7 +14877,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            pad is null.
          **/
    @@ -13564,6 +14885,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5T
    +     *
          * H5Tset_pad sets the least and most-significant bits padding types.
          *
          * @param type_id
    @@ -13576,12 +14899,14 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          **/
         public synchronized static native int H5Tset_pad(long type_id, int lsb, int msb)
             throws HDF5LibraryException;
     
         /**
    +     * @ingroup JH5T
    +     *
          * H5Tget_precision returns the precision of an atomic datatype.
          *
          * @param type_id
    @@ -13590,11 +14915,13 @@ public class H5 implements java.io.Serializable {
          * @return the number of significant bits if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          **/
         public synchronized static native int H5Tget_precision(long type_id) throws HDF5LibraryException;
     
         /**
    +     * @ingroup JH5T
    +     *
          * H5Tset_precision sets the precision of an atomic datatype.
          *
          * @param type_id
    @@ -13605,7 +14932,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          **/
         public static int H5Tset_precision(long type_id, int precision) throws HDF5LibraryException
         {
    @@ -13614,6 +14941,8 @@ public class H5 implements java.io.Serializable {
         }
     
         /**
    +     * @ingroup JH5T
    +     *
          * H5Tget_precision returns the precision of an atomic datatype.
          *
          * @param type_id
    @@ -13622,11 +14951,13 @@ public class H5 implements java.io.Serializable {
          * @return the number of significant bits if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          **/
         public synchronized static native long H5Tget_precision_long(long type_id) throws HDF5LibraryException;
     
         /**
    +     * @ingroup JH5T
    +     *
          * H5Tset_precision sets the precision of an atomic datatype.
          *
          * @param type_id
    @@ -13635,12 +14966,14 @@ public class H5 implements java.io.Serializable {
          *            IN: Number of bits of precision for datatype.
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          **/
         public synchronized static native void H5Tset_precision(long type_id, long precision)
             throws HDF5LibraryException;
     
         /**
    +     * @ingroup JH5T
    +     *
          * H5Tget_sign retrieves the sign type for an integer type.
          *
          * @param type_id
    @@ -13649,11 +14982,13 @@ public class H5 implements java.io.Serializable {
          * @return a valid sign type if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          **/
         public synchronized static native int H5Tget_sign(long type_id) throws HDF5LibraryException;
     
         /**
    +     * @ingroup JH5T
    +     *
          * H5Tset_sign sets the sign property for an integer type.
          *
          * @param type_id
    @@ -13664,11 +14999,13 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          **/
         public synchronized static native int H5Tset_sign(long type_id, int sign) throws HDF5LibraryException;
     
         /**
    +     * @ingroup JH5T
    +     *
          * H5Tget_size returns the size of a datatype in bytes.
          *
          * @param type_id
    @@ -13677,11 +15014,13 @@ public class H5 implements java.io.Serializable {
          * @return the size of the datatype in bytes
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          **/
         public synchronized static native long H5Tget_size(long type_id) throws HDF5LibraryException;
     
         /**
    +     * @ingroup JH5T
    +     *
          * H5Tset_size sets the total size in bytes, size, for an atomic datatype (this operation is not permitted
          * on compound datatypes).
          *
    @@ -13693,11 +15032,13 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          **/
         public synchronized static native int H5Tset_size(long type_id, long size) throws HDF5LibraryException;
     
         /**
    +     * @ingroup JH5T
    +     *
          * H5Tget_strpad retrieves the string padding method for a string datatype.
          *
          * @param type_id
    @@ -13706,11 +15047,13 @@ public class H5 implements java.io.Serializable {
          * @return a valid string padding type if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          **/
         public synchronized static native int H5Tget_strpad(long type_id) throws HDF5LibraryException;
     
         /**
    +     * @ingroup JH5T
    +     *
          * H5Tset_strpad defines the storage mechanism for the string.
          *
          * @param type_id
    @@ -13721,11 +15064,13 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          **/
         public synchronized static native int H5Tset_strpad(long type_id, int strpad) throws HDF5LibraryException;
     
         /**
    +     * @ingroup JH5T
    +     *
          * H5Tget_super returns the type from which TYPE is derived.
          *
          * @param type
    @@ -13734,7 +15079,7 @@ public class H5 implements java.io.Serializable {
          * @return the parent type
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          **/
         public static long H5Tget_super(long type) throws HDF5LibraryException
         {
    @@ -13750,6 +15095,8 @@ public class H5 implements java.io.Serializable {
         private synchronized static native long _H5Tget_super(long type) throws HDF5LibraryException;
     
         /**
    +     * @ingroup JH5T
    +     *
          * H5Tget_tag returns the tag associated with datatype type_id.
          *
          * @param type
    @@ -13758,11 +15105,13 @@ public class H5 implements java.io.Serializable {
          * @return the tag
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          **/
         public synchronized static native String H5Tget_tag(long type) throws HDF5LibraryException;
     
         /**
    +     * @ingroup JH5T
    +     *
          * H5Tset_tag tags an opaque datatype type_id with a unique ASCII identifier tag.
          *
          * @param type
    @@ -13773,11 +15122,13 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          **/
         public synchronized static native int H5Tset_tag(long type, String tag) throws HDF5LibraryException;
     
         /**
    +     * @ingroup JH5T
    +     *
          * H5Tinsert adds another member to the compound datatype type_id.
          *
          * @param type_id
    @@ -13792,7 +15143,7 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            name is null.
          **/
    @@ -13800,6 +15151,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5T
    +     *
          * H5Tis_variable_str determines whether the datatype identified in type_id is a variable-length string.
          *
          * @param type_id
    @@ -13808,11 +15161,13 @@ public class H5 implements java.io.Serializable {
          * @return true if type_id is a variable-length string.
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          **/
         public synchronized static native boolean H5Tis_variable_str(long type_id) throws HDF5LibraryException;
     
         /**
    +     * @ingroup JH5T
    +     *
          * H5Tlock locks the datatype specified by the type_id identifier, making it read-only and
          * non-destrucible.
          *
    @@ -13822,11 +15177,13 @@ public class H5 implements java.io.Serializable {
          * @return a non-negative value if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          **/
         public synchronized static native int H5Tlock(long type_id) throws HDF5LibraryException;
     
         /**
    +     * @ingroup JH5T
    +     *
          * H5Topen opens a named datatype at the location specified by loc_id and return an identifier for the
          * datatype.
          *
    @@ -13840,7 +15197,7 @@ public class H5 implements java.io.Serializable {
          * @return a named datatype identifier if successful
          *
          * @exception HDF5LibraryException
    -     *            Error from the HDF-5 Library.
    +     *            Error from the HDF5 Library.
          * @exception NullPointerException
          *            name is null.
          **/
    @@ -13860,6 +15217,8 @@ public class H5 implements java.io.Serializable {
             throws HDF5LibraryException, NullPointerException;
     
         /**
    +     * @ingroup JH5T
    +     *
          * H5Tpack recursively removes padding from within a compound datatype to make it more efficient
          * (space-wise) to store that data. 

    WARNING: This call only affects the C-data, even if it * succeeds, there may be no visible effect on Java objects. @@ -13870,11 +15229,13 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native int H5Tpack(long type_id) throws HDF5LibraryException; /** + * @ingroup JH5T + * * H5Treclaim reclaims buffer used for VL data. * * @param type_id @@ -13887,7 +15248,7 @@ public class H5 implements java.io.Serializable { * Buffer with data to be reclaimed. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. * @exception NullPointerException * buf is null. **/ @@ -13896,6 +15257,8 @@ public class H5 implements java.io.Serializable { throws HDF5LibraryException, NullPointerException; /** + * @ingroup JH5T + * * H5Tvlen_create creates a new variable-length (VL) dataype. * * @param base_id @@ -13904,7 +15267,7 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public static long H5Tvlen_create(long base_id) throws HDF5LibraryException { @@ -13920,6 +15283,8 @@ public class H5 implements java.io.Serializable { private synchronized static native long _H5Tvlen_create(long base_id) throws HDF5LibraryException; /** + * @ingroup JH5T + * * H5Tflush causes all buffers associated with a committed datatype to be immediately flushed to disk * without removing the data from the cache. * @@ -13927,11 +15292,13 @@ public class H5 implements java.io.Serializable { * IN: Identifier of the committed datatype to be flushed. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native void H5Tflush(long dtype_id) throws HDF5LibraryException; /** + * @ingroup JH5T + * * H5Trefresh causes all buffers associated with a committed datatype to be cleared and immediately * re-loaded with updated contents from disk. This function essentially closes the datatype, evicts * all metadata associated with it from the cache, and then re-opens the datatype. The reopened datatype @@ -13941,7 +15308,7 @@ public class H5 implements java.io.Serializable { * IN: Identifier of the committed datatype to be refreshed. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native void H5Trefresh(long dtype_id) throws HDF5LibraryException; @@ -13962,8 +15329,21 @@ public class H5 implements java.io.Serializable { // // // //////////////////////////////////////////////////////////// - /// VOL Connector Functionality /** + * @defgroup JH5VL Java VOL Connector (H5VL) Interface + * + * @see H5VL, C-API + * + * @see @ref H5VL_UG, User Guide + **/ + + /** + * @defgroup JH5VL Java VOL Connector (H5VL) Interface + **/ + + /** + * @ingroup JH5VL + * * H5VLregister_connector_by_name registers a new VOL connector as a member of the virtual object layer * class. * @@ -13976,11 +15356,13 @@ public class H5 implements java.io.Serializable { * @return a VOL connector ID * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native long H5VLregister_connector_by_name(String connector_name, long vipl_id) throws HDF5LibraryException; /** + * @ingroup JH5VL + * * H5VLregister_connector_by_value registers a new VOL connector as a member of the virtual object layer * class. * @@ -13993,11 +15375,13 @@ public class H5 implements java.io.Serializable { * @return a VOL connector ID * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native long H5VLregister_connector_by_value(int connector_value, long vipl_id) throws HDF5LibraryException; /** + * @ingroup JH5VL + * * H5VLis_connector_registered_by_name tests whether a VOL class has been registered. * * @param name @@ -14006,11 +15390,13 @@ public class H5 implements java.io.Serializable { * @return true if a VOL connector with that name has been registered * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native boolean H5VLis_connector_registered_by_name(String name) throws HDF5LibraryException; /** + * @ingroup JH5VL + * * H5VLis_connector_registered_by_value tests whether a VOL class has been registered. * * @param connector_value @@ -14019,11 +15405,13 @@ public class H5 implements java.io.Serializable { * @return true if a VOL connector with that value has been registered * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native boolean H5VLis_connector_registered_by_value(int connector_value) throws HDF5LibraryException; /** + * @ingroup JH5VL + * * H5VLget_connector_id retrieves the ID for a registered VOL connector for a given object. * * @param object_id @@ -14032,10 +15420,12 @@ public class H5 implements java.io.Serializable { * @return a VOL connector ID * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native long H5VLget_connector_id(long object_id) throws HDF5LibraryException; /** + * @ingroup JH5VL + * * H5VLget_connector_id_by_name retrieves the ID for a registered VOL connector. * * @param name @@ -14044,11 +15434,13 @@ public class H5 implements java.io.Serializable { * @return a VOL connector ID * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native long H5VLget_connector_id_by_name(String name) throws HDF5LibraryException; /** + * @ingroup JH5VL + * * H5VLget_connector_id_by_value retrieves the ID for a registered VOL connector. * * @param connector_value @@ -14057,11 +15449,13 @@ public class H5 implements java.io.Serializable { * @return a VOL connector ID * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native long H5VLget_connector_id_by_value(int connector_value) throws HDF5LibraryException; /** + * @ingroup JH5VL + * * H5VLget_connector_name returns the connector name for the VOL associated with the * object or file ID. * @@ -14071,28 +15465,32 @@ public class H5 implements java.io.Serializable { * @return the connector name * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native String H5VLget_connector_name(long object_id) throws HDF5LibraryException; /** + * @ingroup JH5VL + * * H5VLclose closes a VOL connector ID. * * @param connector_id * IN: Identifier of the connector. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native void H5VLclose(long connector_id) throws HDF5LibraryException; /** + * @ingroup JH5VL + * * H5VLunregister_connector removes a VOL connector ID from the library. * * @param connector_id * IN: Identifier of the connector. * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native void H5VLunregister_connector(long connector_id) throws HDF5LibraryException; @@ -14105,8 +15503,17 @@ public class H5 implements java.io.Serializable { // H5Z: Filter Interface Functions // // // // //////////////////////////////////////////////////////////// + /** + * @defgroup JH5Z Java Filter (H5Z) Interface + * + * @see H5Z, C-API + * + * @see @ref H5Z_UG, User Guide + **/ /** + * @ingroup JH5Z + * * H5Zfilter_avail checks if a filter is available. * * @param filter @@ -14115,11 +15522,13 @@ public class H5 implements java.io.Serializable { * @return a non-negative(TRUE/FALSE) value if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native int H5Zfilter_avail(int filter) throws HDF5LibraryException; /** + * @ingroup JH5Z + * * H5Zget_filter_info gets information about a pipeline data filter. * * @param filter @@ -14128,11 +15537,13 @@ public class H5 implements java.io.Serializable { * @return the filter information flags * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native int H5Zget_filter_info(int filter) throws HDF5LibraryException; /** + * @ingroup JH5Z + * * H5Zunregister unregisters a filter. * * @param filter @@ -14141,7 +15552,7 @@ public class H5 implements java.io.Serializable { * @return a non-negative value if successful * * @exception HDF5LibraryException - * Error from the HDF-5 Library. + * Error from the HDF5 Library. **/ public synchronized static native int H5Zunregister(int filter) throws HDF5LibraryException; diff --git a/java/src/hdf/hdf5lib/HDF5Constants.java b/java/src/hdf/hdf5lib/HDF5Constants.java index 5995246..7bd3336 100644 --- a/java/src/hdf/hdf5lib/HDF5Constants.java +++ b/java/src/hdf/hdf5lib/HDF5Constants.java @@ -16,14 +16,15 @@ package hdf.hdf5lib; import hdf.hdf5lib.structs.H5O_token_t; /** + * @page HDF5CONST Constants and Enumerated Types * This class contains C constants and enumerated types of HDF5 library. The - * values of these constants are obtained from the library by calling J2C(int - * jconstant), where jconstant is any of the private constants which start their - * name with "JH5" need to be converted. + * values of these constants are obtained from the library by calling + * the JNI function jconstant, where jconstant is used for any of the private constants + * which start their name with "H5" need to be converted. *

    * Do not edit this file! * - * See also: hdf.hdf5lib.HDF5Library + * @see @ref HDF5LIB */ public class HDF5Constants { static { H5.loadH5Lib(); } @@ -32,8 +33,6 @@ public class HDF5Constants { // Get the HDF5 constants from the library // // ///////////////////////////////////////////////////////////////////////// - // public static final long H5_QUARTER_HADDR_MAX = H5_QUARTER_HADDR_MAX(); - /** Special parameters for szip compression */ public static final int H5_SZIP_MAX_PIXELS_PER_BLOCK = H5_SZIP_MAX_PIXELS_PER_BLOCK(); /** Special parameters for szip compression */ @@ -2014,6 +2013,8 @@ public class HDF5Constants { private static native final int H5ES_STATUS_FAIL(); + private static native final int H5ES_STATUS_CANCELED(); + private static native final int H5F_ACC_CREAT(); private static native final int H5F_ACC_EXCL(); diff --git a/java/src/hdf/hdf5lib/HDF5GroupInfo.java b/java/src/hdf/hdf5lib/HDF5GroupInfo.java deleted file mode 100644 index 880f003..0000000 --- a/java/src/hdf/hdf5lib/HDF5GroupInfo.java +++ /dev/null @@ -1,182 +0,0 @@ -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * - * Copyright by The HDF Group. * - * Copyright by the Board of Trustees of the University of Illinois. * - * All rights reserved. * - * * - * This file is part of HDF5. The full HDF5 copyright notice, including * - * terms governing use, modification, and redistribution, is contained in * - * the COPYING file, which can be found at the root of the source code * - * distribution tree, or in https://www.hdfgroup.org/licenses. * - * If you do not have access to either file, you may request a copy from * - * help@hdfgroup.org. * - * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -package hdf.hdf5lib; - -/** - *

    - * This class is a container for the information reported about an HDF5 Object - * from the H5Gget_obj_info() method. - *

    - * The fileno and objno fields contain four values which uniquely identify an - * object among those HDF5 files which are open: if all four values are the same - * between two objects, then the two objects are the same (provided both files - * are still open). The nlink field is the number of hard links to the object or - * zero when information is being returned about a symbolic link (symbolic links - * do not have hard links but all other objects always have at least one). The - * type field contains the type of the object, one of H5G_GROUP, H5G_DATASET, or - * H5G_LINK. The mtime field contains the modification time. If information is - * being returned about a symbolic link then linklen will be the length of the - * link value (the name of the pointed-to object with the null terminator); - * otherwise linklen will be zero. Other fields may be added to this structure - * in the future. - */ - -public class HDF5GroupInfo { - long[] fileno; - long[] objno; - int nlink; - int type; - long mtime; - int linklen; - - /** - * Container for the information reported about an HDF5 Object - * from the H5Gget_obj_info() method - */ - public HDF5GroupInfo() - { - fileno = new long[2]; - objno = new long[2]; - nlink = -1; - type = -1; - mtime = 0; - linklen = 0; - } - - /** - * Sets the HDF5 group information. Used by the JHI5. - * - * @param fn - * File id number - * @param on - * Object id number - * @param nl - * Number of links - * @param t - * Type of the object - * @param mt - * Modification time - * @param len - * Length of link - **/ - public void setGroupInfo(long[] fn, long[] on, int nl, int t, long mt, int len) - { - fileno = fn; - objno = on; - nlink = nl; - type = t; - mtime = mt; - linklen = len; - } - - /** Resets all the group information to defaults. */ - public void reset() - { - fileno[0] = 0; - fileno[1] = 0; - objno[0] = 0; - objno[1] = 0; - nlink = -1; - type = -1; - mtime = 0; - linklen = 0; - } - - /** - * fileno accessors - * @return the file number if successful - */ - public long[] getFileno() { return fileno; } - - /** - * accessors - * @return the object number if successful - */ - public long[] getObjno() { return objno; } - - /** - * accessors - * @return type of group if successful - */ - public int getType() { return type; } - - /** - * accessors - * @return the number of links in the group if successful - */ - public int getNlink() { return nlink; } - - /** - * accessors - * @return the modified time value if successful - */ - public long getMtime() { return mtime; } - - /** - * accessors - * @return a length of link name if successful - */ - public int getLinklen() { return linklen; } - - /** - * The fileno and objno fields contain four values which uniquely identify - * an object among those HDF5 files. - */ - @Override - public boolean equals(Object obj) - { - if (!(obj instanceof HDF5GroupInfo)) { - return false; - } - - HDF5GroupInfo target = (HDF5GroupInfo)obj; - if ((fileno[0] == target.fileno[0]) && (fileno[1] == target.fileno[1]) && - (objno[0] == target.objno[0]) && (objno[1] == target.objno[1])) { - return true; - } - else { - return false; - } - } - - /** - * Returns the object id. - * - * @return the object id - */ - public long getOID() { return objno[0]; } - - /** - * Converts this object to a String representation. - * - * @return a string representation of this object - */ - @Override - public String toString() - { - String fileStr = "fileno=null"; - String objStr = "objno=null"; - - if (fileno != null) { - fileStr = "fileno[0]=" + fileno[0] + ",fileno[1]=" + fileno[1]; - } - - if (objno != null) { - objStr = "objno[0]=" + objno[0] + ",objno[1]=" + objno[1]; - } - - return getClass().getName() + "[" + fileStr + "," + objStr + ",type=" + type + ",nlink=" + nlink + - ",mtime=" + mtime + ",linklen=" + linklen + "]"; - } -} diff --git a/java/src/hdf/hdf5lib/HDFArray.java b/java/src/hdf/hdf5lib/HDFArray.java index 8525fb0..9ea314d 100644 --- a/java/src/hdf/hdf5lib/HDFArray.java +++ b/java/src/hdf/hdf5lib/HDFArray.java @@ -19,14 +19,15 @@ import hdf.hdf5lib.exceptions.HDF5Exception; import hdf.hdf5lib.exceptions.HDF5JavaException; /** + * @page HDFARRAY Java Array Conversion * This is a class for handling multidimensional arrays for HDF. *

    * The purpose is to allow the storage and retrieval of arbitrary array types containing scientific data. *

    * The methods support the conversion of an array to and from Java to a one-dimensional array of bytes - * suitable for I/O by the C library.

    This class heavily uses the HDFNativeData class to convert between Java and C - * representations. + * suitable for I/O by the C library.

    This class heavily uses the + * @ref HDFNATIVE + * class to convert between Java and C representations. */ public class HDFArray { diff --git a/java/src/hdf/hdf5lib/HDFNativeData.java b/java/src/hdf/hdf5lib/HDFNativeData.java index c497043..bc4e866 100644 --- a/java/src/hdf/hdf5lib/HDFNativeData.java +++ b/java/src/hdf/hdf5lib/HDFNativeData.java @@ -17,11 +17,12 @@ import hdf.hdf5lib.exceptions.HDF5Exception; import hdf.hdf5lib.exceptions.HDF5JavaException; /** + * @page HDFNATIVE Native Arrays of Numbers * This class encapsulates native methods to deal with arrays of numbers, * converting from numbers to bytes and bytes to numbers. *

    - * These routines are used by class HDFArray to pass data to and from the - * HDF-5 library. + * These routines are used by class @ref HDFARRAY to pass data to and from the + * HDF5 library. *

    * Methods xxxToByte() convert a Java array of primitive numbers (int, short, * ...) to a Java array of bytes. Methods byteToXxx() convert from a Java array @@ -30,7 +31,7 @@ import hdf.hdf5lib.exceptions.HDF5JavaException; * Variant interfaces convert a section of an array, and also can convert to * sub-classes of Java Number. *

    - * See also: hdf.hdf5lib.HDFArray. + * @see @ref HDFARRAY. */ public class HDFNativeData { diff --git a/java/src/hdf/hdf5lib/callbacks/Callbacks.java b/java/src/hdf/hdf5lib/callbacks/Callbacks.java index 86d6193..3d5fbd1 100644 --- a/java/src/hdf/hdf5lib/callbacks/Callbacks.java +++ b/java/src/hdf/hdf5lib/callbacks/Callbacks.java @@ -13,6 +13,7 @@ package hdf.hdf5lib.callbacks; /** + * @page CALLBACKS HDF5 Java Callbacks Interface * All callback definitions must derive from this interface. Any * derived interfaces must define a single public method named "callback". * You are responsible for deregistering your callback (if necessary) @@ -20,11 +21,14 @@ package hdf.hdf5lib.callbacks; * a callback which has been GC'd, you will likely crash the VM. If * there is no method to deregister the callback (e.g. atexit * in the C library), you must ensure that you always keep a live reference - * to the callback object.

    + * to the callback object. + * * A callback should generally never throw an exception, since it doesn't * necessarily have an encompassing Java environment to catch it. Any * exceptions thrown will be passed to the default callback exception * handler. + * + * @defgroup JCALL HDF5 Library Java Callbacks */ public interface Callbacks { } diff --git a/java/src/hdf/hdf5lib/callbacks/H5A_iterate_cb.java b/java/src/hdf/hdf5lib/callbacks/H5A_iterate_cb.java index 6c68f36..9958b3b 100644 --- a/java/src/hdf/hdf5lib/callbacks/H5A_iterate_cb.java +++ b/java/src/hdf/hdf5lib/callbacks/H5A_iterate_cb.java @@ -20,6 +20,8 @@ import hdf.hdf5lib.structs.H5A_info_t; */ public interface H5A_iterate_cb extends Callbacks { /** + * @ingroup JCALL + * * application callback for each attribute * * @param loc_id the ID for the group or dataset being iterated over diff --git a/java/src/hdf/hdf5lib/callbacks/H5D_append_cb.java b/java/src/hdf/hdf5lib/callbacks/H5D_append_cb.java index cf7ada6..49323a2 100644 --- a/java/src/hdf/hdf5lib/callbacks/H5D_append_cb.java +++ b/java/src/hdf/hdf5lib/callbacks/H5D_append_cb.java @@ -18,6 +18,8 @@ package hdf.hdf5lib.callbacks; */ public interface H5D_append_cb extends Callbacks { /** + * @ingroup JCALL + * * application callback for each dataset access property list * * @param dataset_id the ID for the dataset being iterated over diff --git a/java/src/hdf/hdf5lib/callbacks/H5D_iterate_cb.java b/java/src/hdf/hdf5lib/callbacks/H5D_iterate_cb.java index 54c12e3..5f77998 100644 --- a/java/src/hdf/hdf5lib/callbacks/H5D_iterate_cb.java +++ b/java/src/hdf/hdf5lib/callbacks/H5D_iterate_cb.java @@ -18,6 +18,8 @@ package hdf.hdf5lib.callbacks; */ public interface H5D_iterate_cb extends Callbacks { /** + * @ingroup JCALL + * * application callback for each dataset element * * @param elem the pointer to the element in memory containing the current point diff --git a/java/src/hdf/hdf5lib/callbacks/H5E_walk_cb.java b/java/src/hdf/hdf5lib/callbacks/H5E_walk_cb.java index 5722195..a8ef5df 100644 --- a/java/src/hdf/hdf5lib/callbacks/H5E_walk_cb.java +++ b/java/src/hdf/hdf5lib/callbacks/H5E_walk_cb.java @@ -20,6 +20,8 @@ import hdf.hdf5lib.structs.H5E_error2_t; */ public interface H5E_walk_cb extends Callbacks { /** + * @ingroup JCALL + * * application callback for each error stack element * * @param nidx the index of the current error stack element diff --git a/java/src/hdf/hdf5lib/callbacks/H5L_iterate_t.java b/java/src/hdf/hdf5lib/callbacks/H5L_iterate_t.java index 53635bf..7342e58 100644 --- a/java/src/hdf/hdf5lib/callbacks/H5L_iterate_t.java +++ b/java/src/hdf/hdf5lib/callbacks/H5L_iterate_t.java @@ -20,6 +20,8 @@ import hdf.hdf5lib.structs.H5L_info_t; */ public interface H5L_iterate_t extends Callbacks { /** + * @ingroup JCALL + * * application callback for each group * * @param loc_id the ID for the group being iterated over diff --git a/java/src/hdf/hdf5lib/callbacks/H5O_iterate_t.java b/java/src/hdf/hdf5lib/callbacks/H5O_iterate_t.java index ecf868c..bfe8c67 100644 --- a/java/src/hdf/hdf5lib/callbacks/H5O_iterate_t.java +++ b/java/src/hdf/hdf5lib/callbacks/H5O_iterate_t.java @@ -20,6 +20,8 @@ import hdf.hdf5lib.structs.H5O_info_t; */ public interface H5O_iterate_t extends Callbacks { /** + * @ingroup JCALL + * * application callback for each group * * @param loc_id the ID for the group or dataset being iterated over diff --git a/java/src/hdf/hdf5lib/callbacks/H5P_cls_close_func_cb.java b/java/src/hdf/hdf5lib/callbacks/H5P_cls_close_func_cb.java index 0a09a94..a235861 100644 --- a/java/src/hdf/hdf5lib/callbacks/H5P_cls_close_func_cb.java +++ b/java/src/hdf/hdf5lib/callbacks/H5P_cls_close_func_cb.java @@ -18,6 +18,8 @@ package hdf.hdf5lib.callbacks; */ public interface H5P_cls_close_func_cb extends Callbacks { /** + * @ingroup JCALL + * * application callback for each property list * * @param prop_id the ID for the property list class being iterated over diff --git a/java/src/hdf/hdf5lib/callbacks/H5P_cls_copy_func_cb.java b/java/src/hdf/hdf5lib/callbacks/H5P_cls_copy_func_cb.java index 53f86be..b218e0c 100644 --- a/java/src/hdf/hdf5lib/callbacks/H5P_cls_copy_func_cb.java +++ b/java/src/hdf/hdf5lib/callbacks/H5P_cls_copy_func_cb.java @@ -18,6 +18,8 @@ package hdf.hdf5lib.callbacks; */ public interface H5P_cls_copy_func_cb extends Callbacks { /** + * @ingroup JCALL + * * application callback for each property list * * @param new_prop_id the ID for the property list copy diff --git a/java/src/hdf/hdf5lib/callbacks/H5P_cls_create_func_cb.java b/java/src/hdf/hdf5lib/callbacks/H5P_cls_create_func_cb.java index 8f4e782..3d407d0 100644 --- a/java/src/hdf/hdf5lib/callbacks/H5P_cls_create_func_cb.java +++ b/java/src/hdf/hdf5lib/callbacks/H5P_cls_create_func_cb.java @@ -18,6 +18,8 @@ package hdf.hdf5lib.callbacks; */ public interface H5P_cls_create_func_cb extends Callbacks { /** + * @ingroup JCALL + * * application callback for each property list * * @param prop_id the ID for the property list class being iterated over diff --git a/java/src/hdf/hdf5lib/callbacks/H5P_iterate_cb.java b/java/src/hdf/hdf5lib/callbacks/H5P_iterate_cb.java index db98a67..51a5768 100644 --- a/java/src/hdf/hdf5lib/callbacks/H5P_iterate_cb.java +++ b/java/src/hdf/hdf5lib/callbacks/H5P_iterate_cb.java @@ -18,6 +18,8 @@ package hdf.hdf5lib.callbacks; */ public interface H5P_iterate_cb extends Callbacks { /** + * @ingroup JCALL + * * application callback for each property list * * @param plist the ID for the property list being iterated over diff --git a/java/src/hdf/hdf5lib/callbacks/H5P_prp_close_func_cb.java b/java/src/hdf/hdf5lib/callbacks/H5P_prp_close_func_cb.java index 1aa7ce4..2ddc980 100644 --- a/java/src/hdf/hdf5lib/callbacks/H5P_prp_close_func_cb.java +++ b/java/src/hdf/hdf5lib/callbacks/H5P_prp_close_func_cb.java @@ -18,6 +18,8 @@ package hdf.hdf5lib.callbacks; */ public interface H5P_prp_close_func_cb extends Callbacks { /** + * @ingroup JCALL + * * application callback for each property list * * @param name the name of the property being closed diff --git a/java/src/hdf/hdf5lib/callbacks/H5P_prp_compare_func_cb.java b/java/src/hdf/hdf5lib/callbacks/H5P_prp_compare_func_cb.java index 49cef7d..53caa94 100644 --- a/java/src/hdf/hdf5lib/callbacks/H5P_prp_compare_func_cb.java +++ b/java/src/hdf/hdf5lib/callbacks/H5P_prp_compare_func_cb.java @@ -18,6 +18,8 @@ package hdf.hdf5lib.callbacks; */ public interface H5P_prp_compare_func_cb extends Callbacks { /** + * @ingroup JCALL + * * application callback for each property list * * @param value1 the value of the first property being compared diff --git a/java/src/hdf/hdf5lib/callbacks/H5P_prp_copy_func_cb.java b/java/src/hdf/hdf5lib/callbacks/H5P_prp_copy_func_cb.java index f4924ee..0b2349e 100644 --- a/java/src/hdf/hdf5lib/callbacks/H5P_prp_copy_func_cb.java +++ b/java/src/hdf/hdf5lib/callbacks/H5P_prp_copy_func_cb.java @@ -18,6 +18,8 @@ package hdf.hdf5lib.callbacks; */ public interface H5P_prp_copy_func_cb extends Callbacks { /** + * @ingroup JCALL + * * application callback for each property list * * @param name the name of the property being copied diff --git a/java/src/hdf/hdf5lib/callbacks/H5P_prp_create_func_cb.java b/java/src/hdf/hdf5lib/callbacks/H5P_prp_create_func_cb.java index bce024b..6065ce0 100644 --- a/java/src/hdf/hdf5lib/callbacks/H5P_prp_create_func_cb.java +++ b/java/src/hdf/hdf5lib/callbacks/H5P_prp_create_func_cb.java @@ -18,6 +18,8 @@ package hdf.hdf5lib.callbacks; */ public interface H5P_prp_create_func_cb extends Callbacks { /** + * @ingroup JCALL + * * application callback for each property list * * @param name the name of the property list being created diff --git a/java/src/hdf/hdf5lib/callbacks/H5P_prp_delete_func_cb.java b/java/src/hdf/hdf5lib/callbacks/H5P_prp_delete_func_cb.java index 8c5dccc..4384ca7 100644 --- a/java/src/hdf/hdf5lib/callbacks/H5P_prp_delete_func_cb.java +++ b/java/src/hdf/hdf5lib/callbacks/H5P_prp_delete_func_cb.java @@ -18,6 +18,8 @@ package hdf.hdf5lib.callbacks; */ public interface H5P_prp_delete_func_cb extends Callbacks { /** + * @ingroup JCALL + * * application callback for each property list * * @param prop_id the ID of the property list the property is deleted from diff --git a/java/src/hdf/hdf5lib/callbacks/H5P_prp_get_func_cb.java b/java/src/hdf/hdf5lib/callbacks/H5P_prp_get_func_cb.java index 0f3457f..999c7b0 100644 --- a/java/src/hdf/hdf5lib/callbacks/H5P_prp_get_func_cb.java +++ b/java/src/hdf/hdf5lib/callbacks/H5P_prp_get_func_cb.java @@ -18,6 +18,8 @@ package hdf.hdf5lib.callbacks; */ public interface H5P_prp_get_func_cb extends Callbacks { /** + * @ingroup JCALL + * * application callback for each property list * * @param prop_id the ID for the property list being queried diff --git a/java/src/hdf/hdf5lib/callbacks/H5P_prp_set_func_cb.java b/java/src/hdf/hdf5lib/callbacks/H5P_prp_set_func_cb.java index a55ca3a..893344b 100644 --- a/java/src/hdf/hdf5lib/callbacks/H5P_prp_set_func_cb.java +++ b/java/src/hdf/hdf5lib/callbacks/H5P_prp_set_func_cb.java @@ -18,6 +18,8 @@ package hdf.hdf5lib.callbacks; */ public interface H5P_prp_set_func_cb extends Callbacks { /** + * @ingroup JCALL + * * application callback for each property list * * @param prop_id the ID for the property list being modified diff --git a/java/src/hdf/hdf5lib/callbacks/package-info.java b/java/src/hdf/hdf5lib/callbacks/package-info.java index 114045c..5ef3fab 100644 --- a/java/src/hdf/hdf5lib/callbacks/package-info.java +++ b/java/src/hdf/hdf5lib/callbacks/package-info.java @@ -12,6 +12,7 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /** + * @page CALLBACKS_UG HDF5 Java Callbacks Interface * All callback definitions must derive from the Callbacks interface. Any * derived interfaces must define a single public method named "callback". * You are responsible for deregistering your callback (if necessary) diff --git a/java/src/hdf/hdf5lib/exceptions/HDF5AttributeException.java b/java/src/hdf/hdf5lib/exceptions/HDF5AttributeException.java index 4cb7b1d..f8b526e 100644 --- a/java/src/hdf/hdf5lib/exceptions/HDF5AttributeException.java +++ b/java/src/hdf/hdf5lib/exceptions/HDF5AttributeException.java @@ -16,16 +16,20 @@ package hdf.hdf5lib.exceptions; /** * The class HDF5LibraryException returns errors raised by the HDF5 library. *

    - * This sub-class represents HDF-5 major error code H5E_ATTR + * This sub-class represents HDF5 major error code H5E_ATTR */ public class HDF5AttributeException extends HDF5LibraryException { /** + * @ingroup JERRLIB + * * Constructs an HDF5AttributeException with no specified * detail message. */ public HDF5AttributeException() { super(); } /** + * @ingroup JERRLIB + * * Constructs an HDF5AttributeException with the specified * detail message. * diff --git a/java/src/hdf/hdf5lib/exceptions/HDF5BtreeException.java b/java/src/hdf/hdf5lib/exceptions/HDF5BtreeException.java index 9f70456..71b8e47 100644 --- a/java/src/hdf/hdf5lib/exceptions/HDF5BtreeException.java +++ b/java/src/hdf/hdf5lib/exceptions/HDF5BtreeException.java @@ -16,16 +16,20 @@ package hdf.hdf5lib.exceptions; /** * The class HDF5LibraryException returns errors raised by the HDF5 library. *

    - * This sub-class represents HDF-5 major error code H5E_BTREE + * This sub-class represents HDF5 major error code H5E_BTREE */ public class HDF5BtreeException extends HDF5LibraryException { /** + * @ingroup JERRLIB + * * Constructs an HDF5BtreeException with no specified detail * message. */ public HDF5BtreeException() { super(); } /** + * @ingroup JERRLIB + * * Constructs an HDF5BtreeException with the specified detail * message. * diff --git a/java/src/hdf/hdf5lib/exceptions/HDF5DataFiltersException.java b/java/src/hdf/hdf5lib/exceptions/HDF5DataFiltersException.java index b4397b7..a837708 100644 --- a/java/src/hdf/hdf5lib/exceptions/HDF5DataFiltersException.java +++ b/java/src/hdf/hdf5lib/exceptions/HDF5DataFiltersException.java @@ -16,16 +16,20 @@ package hdf.hdf5lib.exceptions; /** * The class HDF5LibraryException returns errors raised by the HDF5 library. *

    - * This sub-class represents HDF-5 major error code H5E_PLINE + * This sub-class represents HDF5 major error code H5E_PLINE */ public class HDF5DataFiltersException extends HDF5LibraryException { /** + * @ingroup JERRLIB + * * Constructs an HDF5DataFiltersException with no specified * detail message. */ public HDF5DataFiltersException() { super(); } /** + * @ingroup JERRLIB + * * Constructs an HDF5DataFiltersException with the specified * detail message. * diff --git a/java/src/hdf/hdf5lib/exceptions/HDF5DataStorageException.java b/java/src/hdf/hdf5lib/exceptions/HDF5DataStorageException.java index f6993a8..d9f49da 100644 --- a/java/src/hdf/hdf5lib/exceptions/HDF5DataStorageException.java +++ b/java/src/hdf/hdf5lib/exceptions/HDF5DataStorageException.java @@ -16,17 +16,21 @@ package hdf.hdf5lib.exceptions; /** * The class HDF5LibraryException returns errors raised by the HDF5 library. *

    - * This sub-class represents HDF-5 major error code H5E_STORAGE + * This sub-class represents HDF5 major error code H5E_STORAGE */ public class HDF5DataStorageException extends HDF5LibraryException { /** + * @ingroup JERRLIB + * * Constructs an HDF5DataStorageExceptionn with no specified * detail message. */ public HDF5DataStorageException() { super(); } /** + * @ingroup JERRLIB + * * Constructs an HDF5DataStorageException with the specified * detail message. * diff --git a/java/src/hdf/hdf5lib/exceptions/HDF5DatasetInterfaceException.java b/java/src/hdf/hdf5lib/exceptions/HDF5DatasetInterfaceException.java index 8fd4ae9..fea1346 100644 --- a/java/src/hdf/hdf5lib/exceptions/HDF5DatasetInterfaceException.java +++ b/java/src/hdf/hdf5lib/exceptions/HDF5DatasetInterfaceException.java @@ -16,16 +16,20 @@ package hdf.hdf5lib.exceptions; /** * The class HDF5LibraryException returns errors raised by the HDF5 library. *

    - * This sub-class represents HDF-5 major error code H5E_DATASET + * This sub-class represents HDF5 major error code H5E_DATASET */ public class HDF5DatasetInterfaceException extends HDF5LibraryException { /** + * @ingroup JERRLIB + * * Constructs an HDF5DatasetInterfaceException with no * specified detail message. */ public HDF5DatasetInterfaceException() { super(); } /** + * @ingroup JERRLIB + * * Constructs an HDF5DatasetInterfaceException with the * specified detail message. * diff --git a/java/src/hdf/hdf5lib/exceptions/HDF5DataspaceInterfaceException.java b/java/src/hdf/hdf5lib/exceptions/HDF5DataspaceInterfaceException.java index d0d2a09..e2d29d0 100644 --- a/java/src/hdf/hdf5lib/exceptions/HDF5DataspaceInterfaceException.java +++ b/java/src/hdf/hdf5lib/exceptions/HDF5DataspaceInterfaceException.java @@ -16,17 +16,21 @@ package hdf.hdf5lib.exceptions; /** * The class HDF5LibraryException returns errors raised by the HDF5 library. *

    - * This sub-class represents HDF-5 major error code H5E_DATASPACE + * This sub-class represents HDF5 major error code H5E_DATASPACE */ public class HDF5DataspaceInterfaceException extends HDF5LibraryException { /** + * @ingroup JERRLIB + * * Constructs an HDF5DataspaceInterfaceException with no * specified detail message. */ public HDF5DataspaceInterfaceException() { super(); } /** + * @ingroup JERRLIB + * * Constructs an HDF5DataspaceInterfaceException with the * specified detail message. * diff --git a/java/src/hdf/hdf5lib/exceptions/HDF5DatatypeInterfaceException.java b/java/src/hdf/hdf5lib/exceptions/HDF5DatatypeInterfaceException.java index 2ab4ff9..d7e678b 100644 --- a/java/src/hdf/hdf5lib/exceptions/HDF5DatatypeInterfaceException.java +++ b/java/src/hdf/hdf5lib/exceptions/HDF5DatatypeInterfaceException.java @@ -16,17 +16,21 @@ package hdf.hdf5lib.exceptions; /** * The class HDF5LibraryException returns errors raised by the HDF5 library. *

    - * This sub-class represents HDF-5 major error code H5E_DATATYPE + * This sub-class represents HDF5 major error code H5E_DATATYPE */ public class HDF5DatatypeInterfaceException extends HDF5LibraryException { /** + * @ingroup JERRLIB + * * Constructs an HDF5DatatypeInterfaceException with no * specified detail message. */ public HDF5DatatypeInterfaceException() { super(); } /** + * @ingroup JERRLIB + * * Constructs an HDF5DatatypeInterfaceException with the * specified detail message. * diff --git a/java/src/hdf/hdf5lib/exceptions/HDF5Exception.java b/java/src/hdf/hdf5lib/exceptions/HDF5Exception.java index b098a12..ad42644 100644 --- a/java/src/hdf/hdf5lib/exceptions/HDF5Exception.java +++ b/java/src/hdf/hdf5lib/exceptions/HDF5Exception.java @@ -14,21 +14,23 @@ package hdf.hdf5lib.exceptions; /** - *

    + * @page ERRORS Errors and Exceptions * The class HDF5Exception returns errors from the Java HDF5 Interface. - *

    + * * Two sub-classes of HDF5Exception are defined: *

      *
    1. - * HDF5LibraryException -- errors raised the HDF5 library code + * HDF5LibraryException -- errors raised by the HDF5 library code *
    2. - * HDF5JavaException -- errors raised the HDF5 Java wrapper code + * HDF5JavaException -- errors raised by the HDF5 Java wrapper code *
    - *

    + * * These exceptions are sub-classed to represent specific error conditions, as * needed. In particular, HDF5LibraryException has a sub-class for each major * error code returned by the HDF5 library. * + * @defgroup JERR HDF5 Library Exception Interface + * */ public class HDF5Exception extends RuntimeException { /** @@ -37,12 +39,16 @@ public class HDF5Exception extends RuntimeException { protected String detailMessage; /** + * @ingroup JERR + * * Constructs an HDF5Exception with no specified detail * message. */ public HDF5Exception() { super(); } /** + * @ingroup JERR + * * Constructs an HDF5Exception with the specified detail * message. * @@ -56,6 +62,8 @@ public class HDF5Exception extends RuntimeException { } /** + * @ingroup JERR + * * Returns the detail message of this exception * * @return the detail message or null if this object does not diff --git a/java/src/hdf/hdf5lib/exceptions/HDF5ExternalFileListException.java b/java/src/hdf/hdf5lib/exceptions/HDF5ExternalFileListException.java index c8df3d0..f9f49a1 100644 --- a/java/src/hdf/hdf5lib/exceptions/HDF5ExternalFileListException.java +++ b/java/src/hdf/hdf5lib/exceptions/HDF5ExternalFileListException.java @@ -16,17 +16,21 @@ package hdf.hdf5lib.exceptions; /** * The class HDF5LibraryException returns errors raised by the HDF5 library. *

    - * This sub-class represents HDF-5 major error code H5E_EFL + * This sub-class represents HDF5 major error code H5E_EFL */ public class HDF5ExternalFileListException extends HDF5LibraryException { /** + * @ingroup JERRLIB + * * Constructs an HDF5ExternalFileListException with no * specified detail message. */ public HDF5ExternalFileListException() { super(); } /** + * @ingroup JERRLIB + * * Constructs an HDF5ExternalFileListException with the * specified detail message. * diff --git a/java/src/hdf/hdf5lib/exceptions/HDF5FileInterfaceException.java b/java/src/hdf/hdf5lib/exceptions/HDF5FileInterfaceException.java index afd6d69..3ebe63a 100644 --- a/java/src/hdf/hdf5lib/exceptions/HDF5FileInterfaceException.java +++ b/java/src/hdf/hdf5lib/exceptions/HDF5FileInterfaceException.java @@ -16,17 +16,21 @@ package hdf.hdf5lib.exceptions; /** * The class HDF5LibraryException returns errors raised by the HDF5 library. *

    - * This sub-class represents HDF-5 major error code H5E_FILE + * This sub-class represents HDF5 major error code H5E_FILE */ public class HDF5FileInterfaceException extends HDF5LibraryException { /** + * @ingroup JERRLIB + * * Constructs an HDF5FileInterfaceException with no specified * detail message. */ public HDF5FileInterfaceException() { super(); } /** + * @ingroup JERRLIB + * * Constructs an HDF5FileInterfaceException with the specified * detail message. * diff --git a/java/src/hdf/hdf5lib/exceptions/HDF5FunctionArgumentException.java b/java/src/hdf/hdf5lib/exceptions/HDF5FunctionArgumentException.java index 58e2980..3dc0c72 100644 --- a/java/src/hdf/hdf5lib/exceptions/HDF5FunctionArgumentException.java +++ b/java/src/hdf/hdf5lib/exceptions/HDF5FunctionArgumentException.java @@ -16,17 +16,21 @@ package hdf.hdf5lib.exceptions; /** * The class HDF5LibraryException returns errors raised by the HDF5 library. *

    - * This sub-class represents HDF-5 major error code H5E_ARGS + * This sub-class represents HDF5 major error code H5E_ARGS */ public class HDF5FunctionArgumentException extends HDF5LibraryException { /** + * @ingroup JERRLIB + * * Constructs an HDF5FunctionArgumentException with no * specified detail message. */ public HDF5FunctionArgumentException() { super(); } /** + * @ingroup JERRLIB + * * Constructs an HDF5FunctionArgumentException with the * specified detail message. * diff --git a/java/src/hdf/hdf5lib/exceptions/HDF5FunctionEntryExitException.java b/java/src/hdf/hdf5lib/exceptions/HDF5FunctionEntryExitException.java index db46aae..aa9289c 100644 --- a/java/src/hdf/hdf5lib/exceptions/HDF5FunctionEntryExitException.java +++ b/java/src/hdf/hdf5lib/exceptions/HDF5FunctionEntryExitException.java @@ -16,17 +16,21 @@ package hdf.hdf5lib.exceptions; /** * The class HDF5LibraryException returns errors raised by the HDF5 library. *

    - * This sub-class represents HDF-5 major error code H5E_FUNC + * This sub-class represents HDF5 major error code H5E_FUNC */ public class HDF5FunctionEntryExitException extends HDF5LibraryException { /** + * @ingroup JERRLIB + * * Constructs an HDF5FunctionEntryExitException with no * specified detail message. */ public HDF5FunctionEntryExitException() { super(); } /** + * @ingroup JERRLIB + * * Constructs an HDF5FunctionEntryExitException with the * specified detail message. * diff --git a/java/src/hdf/hdf5lib/exceptions/HDF5HeapException.java b/java/src/hdf/hdf5lib/exceptions/HDF5HeapException.java index 7f1691d..ba1b5ad 100644 --- a/java/src/hdf/hdf5lib/exceptions/HDF5HeapException.java +++ b/java/src/hdf/hdf5lib/exceptions/HDF5HeapException.java @@ -16,17 +16,21 @@ package hdf.hdf5lib.exceptions; /** * The class HDF5LibraryException returns errors raised by the HDF5 library. *

    - * This sub-class represents HDF-5 major error code H5E_HEAP + * This sub-class represents HDF5 major error code H5E_HEAP */ public class HDF5HeapException extends HDF5LibraryException { /** + * @ingroup JERRLIB + * * Constructs an HDF5HeapException with no specified detail * message. */ public HDF5HeapException() { super(); } /** + * @ingroup JERRLIB + * * Constructs an HDF5HeapException with the specified detail * message. * diff --git a/java/src/hdf/hdf5lib/exceptions/HDF5IdException.java b/java/src/hdf/hdf5lib/exceptions/HDF5IdException.java index 5ce1f01..9dd2d8a 100644 --- a/java/src/hdf/hdf5lib/exceptions/HDF5IdException.java +++ b/java/src/hdf/hdf5lib/exceptions/HDF5IdException.java @@ -16,17 +16,21 @@ package hdf.hdf5lib.exceptions; /** * The class HDF5LibraryException returns errors raised by the HDF5 library. *

    - * This sub-class represents HDF-5 major error code H5E_ID + * This sub-class represents HDF5 major error code H5E_ID */ public class HDF5IdException extends HDF5LibraryException { /** + * @ingroup JERRLIB + * * Constructs an HDF5IdException with no specified detail * message. */ public HDF5IdException() { super(); } /** + * @ingroup JERRLIB + * * Constructs an HDF5IdException with the specified detail * message. * diff --git a/java/src/hdf/hdf5lib/exceptions/HDF5InternalErrorException.java b/java/src/hdf/hdf5lib/exceptions/HDF5InternalErrorException.java index 4489486..31efe56 100644 --- a/java/src/hdf/hdf5lib/exceptions/HDF5InternalErrorException.java +++ b/java/src/hdf/hdf5lib/exceptions/HDF5InternalErrorException.java @@ -16,17 +16,21 @@ package hdf.hdf5lib.exceptions; /** * The class HDF5LibraryException returns errors raised by the HDF5 library. *

    - * This sub-class represents HDF-5 major error code H5E_INTERNAL + * This sub-class represents HDF5 major error code H5E_INTERNAL */ public class HDF5InternalErrorException extends HDF5LibraryException { /** + * @ingroup JERRLIB + * * Constructs an HDF5InternalErrorException with no specified * detail message. */ public HDF5InternalErrorException() { super(); } /** + * @ingroup JERRLIB + * * Constructs an HDF5InternalErrorException with the specified * detail message. * diff --git a/java/src/hdf/hdf5lib/exceptions/HDF5JavaException.java b/java/src/hdf/hdf5lib/exceptions/HDF5JavaException.java index ae1cf85..9b38b87 100644 --- a/java/src/hdf/hdf5lib/exceptions/HDF5JavaException.java +++ b/java/src/hdf/hdf5lib/exceptions/HDF5JavaException.java @@ -14,21 +14,27 @@ package hdf.hdf5lib.exceptions; /** - *

    + * @page ERRORSJAVA Java Wrapper Errors and Exceptions * The class HDF5JavaException returns errors from the Java wrapper of theHDF5 * library. *

    * These errors include Java configuration errors, security violations, and * resource exhaustion. + * + * @defgroup JERRJAVA HDF5 Library Java Exception Interface */ public class HDF5JavaException extends HDF5Exception { /** + * @ingroup JERRJAVA + * * Constructs an HDF5JavaException with no specified detail * message. */ public HDF5JavaException() { super(); } /** + * @ingroup JERRJAVA + * * Constructs an HDF5JavaException with the specified detail * message. * diff --git a/java/src/hdf/hdf5lib/exceptions/HDF5LibraryException.java b/java/src/hdf/hdf5lib/exceptions/HDF5LibraryException.java index 7c9773f..42472b53 100644 --- a/java/src/hdf/hdf5lib/exceptions/HDF5LibraryException.java +++ b/java/src/hdf/hdf5lib/exceptions/HDF5LibraryException.java @@ -17,14 +17,16 @@ import hdf.hdf5lib.H5; import hdf.hdf5lib.HDF5Constants; /** - *

    + * @page ERRORSLIB HDF5 Library Errors and Exceptions * The class HDF5LibraryException returns errors raised by the HDF5 library. - *

    - * Each major error code from the HDF-5 Library is represented by a sub-class of + * + * Each major error code from the HDF5 Library is represented by a sub-class of * this class, and by default the 'detailedMessage' is set according to the - * minor error code from the HDF-5 Library. + * minor error code from the HDF5 Library. *

    - * For major and minor error codes, see H5Epublic.h in the HDF-5 library. + * For major and minor error codes, @see @ref H5E in the HDF5 library. + * + * @defgroup JERRLIB HDF5 Library JNI Exception Interface * */ @@ -36,6 +38,8 @@ public class HDF5LibraryException extends HDF5Exception { private final long minorErrorNumber; /** + * @ingroup JERRLIB + * * Constructs an HDF5LibraryException with no specified detail * message. */ @@ -43,7 +47,7 @@ public class HDF5LibraryException extends HDF5Exception { { super(); - // this code forces the loading of the HDF-5 library + // this code forces the loading of the HDF5 library // to assure that the native methods are available try { H5.H5open(); @@ -57,6 +61,8 @@ public class HDF5LibraryException extends HDF5Exception { } /** + * @ingroup JERRLIB + * * Constructs an HDF5LibraryException with the specified detail * message. * @@ -66,7 +72,7 @@ public class HDF5LibraryException extends HDF5Exception { public HDF5LibraryException(String s) { super(s); - // this code forces the loading of the HDF-5 library + // this code forces the loading of the HDF5 library // to assure that the native methods are available try { H5.H5open(); @@ -78,6 +84,8 @@ public class HDF5LibraryException extends HDF5Exception { } /** + * @ingroup JERRLIB + * * Get the major error number of the first error on the HDF5 library error * stack. * @@ -87,6 +95,8 @@ public class HDF5LibraryException extends HDF5Exception { private native long _getMajorErrorNumber(); /** + * @ingroup JERRLIB + * * Get the minor error number of the first error on the HDF5 library error * stack. * @@ -96,9 +106,11 @@ public class HDF5LibraryException extends HDF5Exception { private native long _getMinorErrorNumber(); /** - * Return a error message for the minor error number. - *

    - * These messages come from H5Epublic.h. + * @ingroup JERRLIB + * + * Return an error message for the minor error number. + * + * These messages come from @ref H5E. * * @param err_code * the error code @@ -349,20 +361,24 @@ public class HDF5LibraryException extends HDF5Exception { } /** - * Prints this HDF5LibraryException, the HDF-5 Library error - * stack, and the Java stack trace to the standard error stream. + * @ingroup JERRLIB + * + * Prints this HDF5LibraryException, the HDF5 Library error + * stack, and and the Java stack trace to the standard error stream. */ @Override public void printStackTrace() { System.err.println(this); - printStackTrace0(null); // the HDF-5 Library error stack + printStackTrace0(null); // the HDF5 Library error stack super.printStackTrace(); // the Java stack trace } /** - * Prints this HDF5LibraryException the HDF-5 Library error - * stack, and the Java stack trace to the specified print stream. + * @ingroup JERRLIB + * + * Prints this HDF5LibraryException the HDF5 Library error + * stack, and and the Java stack trace to the specified print stream. * * @param f * the file print stream. @@ -382,14 +398,14 @@ public class HDF5LibraryException extends HDF5Exception { catch (Exception ex) { System.err.println(this); }; - // the HDF-5 Library error stack + // the HDF5 Library error stack printStackTrace0(f.getPath()); super.printStackTrace(); // the Java stack trace } } /* - * This private method calls the HDF-5 library to extract the error codes + * This private method calls the HDF5 library to extract the error codes * and error stack. */ private native void printStackTrace0(String s); diff --git a/java/src/hdf/hdf5lib/exceptions/HDF5LowLevelIOException.java b/java/src/hdf/hdf5lib/exceptions/HDF5LowLevelIOException.java index fef5721..719748e 100644 --- a/java/src/hdf/hdf5lib/exceptions/HDF5LowLevelIOException.java +++ b/java/src/hdf/hdf5lib/exceptions/HDF5LowLevelIOException.java @@ -16,17 +16,21 @@ package hdf.hdf5lib.exceptions; /** * The class HDF5LibraryException returns errors raised by the HDF5 library. *

    - * This sub-class represents HDF-5 major error code H5E_IO + * This sub-class represents HDF5 major error code H5E_IO */ public class HDF5LowLevelIOException extends HDF5LibraryException { /** + * @ingroup JERRLIB + * * Constructs an HDF5LowLevelIOException with no specified * detail message. */ public HDF5LowLevelIOException() { super(); } /** + * @ingroup JERRLIB + * * Constructs an HDF5LowLevelIOException with the specified * detail message. * diff --git a/java/src/hdf/hdf5lib/exceptions/HDF5MetaDataCacheException.java b/java/src/hdf/hdf5lib/exceptions/HDF5MetaDataCacheException.java index 4f00006..298d8b8 100644 --- a/java/src/hdf/hdf5lib/exceptions/HDF5MetaDataCacheException.java +++ b/java/src/hdf/hdf5lib/exceptions/HDF5MetaDataCacheException.java @@ -16,17 +16,21 @@ package hdf.hdf5lib.exceptions; /** * The class HDF5LibraryException returns errors raised by the HDF5 library. *

    - * This sub-class represents HDF-5 major error code H5E_CACHE + * This sub-class represents HDF5 major error code H5E_CACHE */ public class HDF5MetaDataCacheException extends HDF5LibraryException { /** + * @ingroup JERRLIB + * * Constructs an HDF5MetaDataCacheException with no specified * detail message. */ public HDF5MetaDataCacheException() { super(); } /** + * @ingroup JERRLIB + * * Constructs an HDF5MetaDataCacheException with the specified * detail message. * diff --git a/java/src/hdf/hdf5lib/exceptions/HDF5ObjectHeaderException.java b/java/src/hdf/hdf5lib/exceptions/HDF5ObjectHeaderException.java index 9675354..b6e94be 100644 --- a/java/src/hdf/hdf5lib/exceptions/HDF5ObjectHeaderException.java +++ b/java/src/hdf/hdf5lib/exceptions/HDF5ObjectHeaderException.java @@ -16,17 +16,21 @@ package hdf.hdf5lib.exceptions; /** * The class HDF5LibraryException returns errors raised by the HDF5 library. *

    - * This sub-class represents HDF-5 major error code H5E_OHDR + * This sub-class represents HDF5 major error code H5E_OHDR */ public class HDF5ObjectHeaderException extends HDF5LibraryException { /** + * @ingroup JERRLIB + * * Constructs an HDF5ObjectHeaderException with no specified * detail message. */ public HDF5ObjectHeaderException() { super(); } /** + * @ingroup JERRLIB + * * Constructs an HDF5ObjectHeaderException with the specified * detail message. * diff --git a/java/src/hdf/hdf5lib/exceptions/HDF5PropertyListInterfaceException.java b/java/src/hdf/hdf5lib/exceptions/HDF5PropertyListInterfaceException.java index 66f0bd1..68d581f 100644 --- a/java/src/hdf/hdf5lib/exceptions/HDF5PropertyListInterfaceException.java +++ b/java/src/hdf/hdf5lib/exceptions/HDF5PropertyListInterfaceException.java @@ -16,17 +16,21 @@ package hdf.hdf5lib.exceptions; /** * The class HDF5LibraryException returns errors raised by the HDF5 library. *

    - * This sub-class represents HDF-5 major error code H5E_PLIST + * This sub-class represents HDF5 major error code H5E_PLIST */ public class HDF5PropertyListInterfaceException extends HDF5LibraryException { /** + * @ingroup JERRLIB + * * Constructs an HDF5PropertyListInterfaceException with no * specified detail message. */ public HDF5PropertyListInterfaceException() { super(); } /** + * @ingroup JERRLIB + * * Constructs an HDF5PropertyListInterfaceException with the * specified detail message. * diff --git a/java/src/hdf/hdf5lib/exceptions/HDF5ReferenceException.java b/java/src/hdf/hdf5lib/exceptions/HDF5ReferenceException.java index 4feaba7..4c96136 100644 --- a/java/src/hdf/hdf5lib/exceptions/HDF5ReferenceException.java +++ b/java/src/hdf/hdf5lib/exceptions/HDF5ReferenceException.java @@ -16,17 +16,21 @@ package hdf.hdf5lib.exceptions; /** * The class HDF5LibraryException returns errors raised by the HDF5 library. *

    - * This sub-class represents HDF-5 major error code H5E_REFERENCE + * This sub-class represents HDF5 major error code H5E_REFERENCE */ public class HDF5ReferenceException extends HDF5LibraryException { /** + * @ingroup JERRLIB + * * Constructs an HDF5ReferenceException with no specified * detail message. */ public HDF5ReferenceException() { super(); } /** + * @ingroup JERRLIB + * * Constructs an HDF5ReferenceException with the specified * detail message. * diff --git a/java/src/hdf/hdf5lib/exceptions/HDF5ResourceUnavailableException.java b/java/src/hdf/hdf5lib/exceptions/HDF5ResourceUnavailableException.java index 1a007e7..f920c53 100644 --- a/java/src/hdf/hdf5lib/exceptions/HDF5ResourceUnavailableException.java +++ b/java/src/hdf/hdf5lib/exceptions/HDF5ResourceUnavailableException.java @@ -16,17 +16,21 @@ package hdf.hdf5lib.exceptions; /** * The class HDF5LibraryException returns errors raised by the HDF5 library. *

    - * This sub-class represents HDF-5 major error code H5E_RESOURCE + * This sub-class represents HDF5 major error code H5E_RESOURCE */ public class HDF5ResourceUnavailableException extends HDF5LibraryException { /** + * @ingroup JERRLIB + * * Constructs an HDF5ResourceUnavailableException with no * specified detail message. */ public HDF5ResourceUnavailableException() { super(); } /** + * @ingroup JERRLIB + * * Constructs an HDF5FunctionArgumentException with the * specified detail message. * diff --git a/java/src/hdf/hdf5lib/exceptions/HDF5SymbolTableException.java b/java/src/hdf/hdf5lib/exceptions/HDF5SymbolTableException.java index 4fb8c2e..5d3aa90 100644 --- a/java/src/hdf/hdf5lib/exceptions/HDF5SymbolTableException.java +++ b/java/src/hdf/hdf5lib/exceptions/HDF5SymbolTableException.java @@ -16,17 +16,21 @@ package hdf.hdf5lib.exceptions; /** * The class HDF5LibraryException returns errors raised by the HDF5 library. *

    - * This sub-class represents HDF-5 major error code H5E_SYM + * This sub-class represents HDF5 major error code H5E_SYM */ public class HDF5SymbolTableException extends HDF5LibraryException { /** + * @ingroup JERRLIB + * * Constructs an HDF5SymbolTableException with no specified * detail message. */ public HDF5SymbolTableException() { super(); } /** + * @ingroup JERRLIB + * * Constructs an HDF5SymbolTableException with the specified * detail message. * diff --git a/java/src/hdf/hdf5lib/exceptions/package-info.java b/java/src/hdf/hdf5lib/exceptions/package-info.java index 8640ccb..2ac7806 100644 --- a/java/src/hdf/hdf5lib/exceptions/package-info.java +++ b/java/src/hdf/hdf5lib/exceptions/package-info.java @@ -12,6 +12,7 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /** + * @page ERRORS_UG Errors and Exceptions *

    * The package exceptions contains error classes for the Java HDF5 Interface. *

    diff --git a/java/src/hdf/hdf5lib/package-info.java b/java/src/hdf/hdf5lib/package-info.java index c04b862..7ae4df9 100644 --- a/java/src/hdf/hdf5lib/package-info.java +++ b/java/src/hdf/hdf5lib/package-info.java @@ -12,10 +12,11 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /** + @page HDF5LIB_UG HDF5 Java Package * This package is the Java interface for the HDF5 library. *

    - * This code is the called by Java programs to access the entry points of the HDF5 library. Each routine wraps - a single + * This code is the called by Java programs to access the entry points of the HDF5 library. + * Each routine wraps a single * HDF5 entry point, generally with the arguments and return codes analogous to the C interface. *

    * For details of the HDF5 library, see the HDF5 Documentation at: @@ -25,14 +26,13 @@ * Mapping of arguments for Java * *

    - * In general, arguments to the HDF Java API are straightforward translations from the 'C' API described in - the HDF - * Reference Manual. + * In general, arguments to the HDF Java API are straightforward translations from the 'C' API described + * in the HDF Reference Manual. * * - * + * * - * + * * * * @@ -60,28 +60,27 @@ * * * - * * * *
    HDF-5 C types to Java types HDF5 C types to Java types
    HDF-5HDF5Java
    java.lang.String
    void *
    + *
    void *
    * (i.e., pointer to `Any')
    Special -- see HDFArray
    * General Rules for Passing Arguments and Results *

    - * In general, arguments passed IN to Java are the analogous basic types, as above. The exception is - for arrays, - * which are discussed below. + * In general, arguments passed IN to Java are the analogous basic types, as above. The exception + * is for arrays, which are discussed below. *

    * The return value of Java methods is also the analogous type, as above. A major exception to that - rule is that + * rule is that * all HDF functions that return SUCCEED/FAIL are declared boolean in the Java version, rather than - int as + * int as * in the C. Functions that return a value or else FAIL are declared the equivalent to the C function. - However, in most + * However, in most * cases the Java method will raise an exception instead of returning an error code. - * See Errors and Exceptions below. + * @see @ref ERRORS. *

    * Java does not support pass by reference of arguments, so arguments that are returned through OUT - parameters + * parameters * must be wrapped in an object or array. The Java API for HDF consistently wraps arguments in arrays. *

    * For instance, a function that returns two integers is declared: @@ -104,12 +103,12 @@ * *

    * All the routines where this convention is used will have specific documentation of the details, given - below. + * below. *

    * Arrays *

    * HDF5 needs to read and write multi-dimensional arrays of any number type (and records). The HDF5 API - describes the + * describes the * layout of the source and destination, and the data for the array passed as a block of bytes, for instance, * *

    @@ -118,52 +117,52 @@
      *
      * 

    * where ``void *'' means that the data may be any valid numeric type, and is a contiguous block of bytes that - is the + * is the * data for a multi-dimensional array. The other parameters describe the dimensions, rank, and datatype of the - array on + * array on * disk (source) and in memory (destination). *

    * For Java, this ``ANY'' is a problem, as the type of data must always be declared. Furthermore, - multidimensional + * multidimensional * arrays are definitely not laid out contiguously in memory. It would be infeasible to declare a - separate + * separate * routine for every combination of number type and dimensionality. For that reason, the - * HDFArray class is used to discover the type, shape, and - size of the + * HDFArray class is used to discover the type, shape, and + * size of the * data array at run time, and to convert to and from a contiguous array of bytes in synchronized static - native C order. + * native C order. *

    * The upshot is that any Java array of numbers (either primitive or sub-classes of type Number) can be - passed as + * passed as * an ``Object'', and the Java API will translate to and from the appropriate packed array of bytes needed by - the C + * the C * library. So the function above would be declared: * *

      * public synchronized static native int H5Dread(long fid, long filetype, long memtype, long memspace, Object
    - data);
    + * data);
      * 
    * OPEN_IDS.addElement(id); * and the parameter data can be any multi-dimensional array of numbers, such as float[][], or - int[][][], or + * int[][][], or * Double[][]. *

    - * HDF-5 Constants + * HDF5 Constants *

    - * The HDF-5 API defines a set of constants and enumerated values. Most of these values are available to Java - programs - * via the class HDF5Constants. For example, the - parameters for + * The HDF5 API defines a set of constants and enumerated values. Most of these values are available to Java + * programs + * via the class HDF5Constants. For example, the + * parameters for * the h5open() call include two numeric values, HDFConstants.H5F_ACC_RDWR and * HDF5Constants.H5P_DEFAULT. As would be expected, these numbers correspond to the C constants * H5F_ACC_RDWR and H5P_DEFAULT. *

    - * The HDF-5 API defines a set of values that describe number types and sizes, such as "H5T_NATIVE_INT" and - "hsize_t". - * These values are determined at run time by the HDF-5 C library. To support these parameters, the Java class - * HDF5CDataTypes looks up the values when initiated. - The values + * The HDF5 API defines a set of values that describe number types and sizes, such as "H5T_NATIVE_INT" and + * "hsize_t". + * These values are determined at run time by the HDF5 C library. To support these parameters, the Java class + * HDF5CDataTypes looks up the values when initiated. + * The values * can be accessed as public variables of the Java class, such as: * *

    @@ -175,31 +174,30 @@
      * 

    * Error handling and Exceptions *

    - * The HDF5 error API (H5E) manages the behavior of the error stack in the HDF-5 library. This API is - available from the + * The HDF5 error API (H5E) manages the behavior of the error stack in the HDF5 library. This API is + * available from the * JHI5. Errors are converted into Java exceptions. This is totally different from the C interface, but is - very natural + * very natural * for Java programming. *

    * The exceptions of the JHI5 are organized as sub-classes of the class - * HDF5Exception. There are two subclasses - of - * HDF5Exception, - HDF5LibraryException - * and HDF5JavaException. The - sub-classes of the - * former represent errors from the HDF-5 C library, while sub-classes of the latter represent errors in the - JHI5 - * wrapper and support code. - *

    - * The super-class HDF5LibraryException implements the method 'printStackTrace()', - which - * prints out the HDF-5 error stack, as described in the HDF-5 C API H5Eprint(). This may be - used by Java - * exception handlers to print out the HDF-5 error stack. + * HDF5Exception. There are two subclasses + * of + * HDF5Exception, @ref ERRORSLIB HDF5LibraryException + * and @ref ERRORSJAVA HDF5JavaException. + * The sub-classes of the former represent errors from the HDF5 C library, + * while sub-classes of the latter represent errors in the JHI5 wrapper and support code. + *

    + * The super-class HDF5LibraryException implements the method + * 'printStackTrace()', which prints out the HDF5 error stack, as described + * in the HDF5 C API @ref H5Eprint(). This may be + * used by Java + * exception handlers to print out the HDF5 error stack. *


    * - * See also: http://hdfgroup.org/HDF5" + * @ref HDF5LIB + * + * @see: HDF5" * */ package hdf.hdf5lib; diff --git a/java/src/hdf/overview.html b/java/src/hdf/overview.html index f6a34fc..8a9d38f 100644 --- a/java/src/hdf/overview.html +++ b/java/src/hdf/overview.html @@ -4,10 +4,10 @@

    What it is

    The Java HD5 Interface (JHI5) is a Java package -(hdf.hdf5lib) +(hdf.hdf5lib) that ``wraps around'' the HDF5 library.

    There are a large number of functions in the HDF5 -library (version 1.13). Some of the functions are not supported in JHI5. Most +library (version 1.14). Some of the functions are not supported in JHI5. Most of the unsupported functions have C function pointers, which is not currently implemented in JHI5.

    @@ -32,7 +32,7 @@ library contains C functions which implement the native methods. The C functions call the standard HDF5 library, which is linked as part of the same library on most platforms.

    The central part of the JHI5 is the Java class -hdf.hdf5lib.H5. +hdf.hdf5lib.H5. The H5 class calls the standard (i.e., `native' code) HDF5 library, with native methods for most of the HDF5 functions. @@ -42,11 +42,11 @@ The JHI5 is used by Java classes to call the HDF5 library, in order to create HDF5 files, and read and write data in existing HDF5 files.

    For example, the HDF5 library has the function H5Fopen to open an HDF5 file. The Java interface is the class -hdf.hdf5lib.H5, +hdf.hdf5lib.H5, which has a method:

    static native int H5Fopen(String filename, int flags, int access );
    The native method is implemented in C using the -Java +Java Native Method Interface (JNI). This is written something like the following:
    JNIEXPORT jlong
     JNICALL Java_hdf_hdf5lib_H5_H5Fopen
    diff --git a/java/src/jni/exceptionImp.c b/java/src/jni/exceptionImp.c
    index 0f135cd..01b4ee2 100644
    --- a/java/src/jni/exceptionImp.c
    +++ b/java/src/jni/exceptionImp.c
    @@ -153,7 +153,7 @@ Java_hdf_hdf5lib_H5_H5error_1on(JNIEnv *env, jclass clss)
      * Method:    printStackTrace0
      * Signature: (Ljava/lang/Object;)V
      *
    - *  Call the HDF-5 library to print the HDF-5 error stack to 'file_name'.
    + *  Call the HDF5 library to print the HDF5 error stack to 'file_name'.
      */
     JNIEXPORT void JNICALL
     Java_hdf_hdf5lib_exceptions_HDF5LibraryException_printStackTrace0(JNIEnv *env, jobject obj, jstring file_name)
    @@ -187,7 +187,7 @@ done:
      * Method:    _getMajorErrorNumber
      * Signature: ()J
      *
    - *  Extract the HDF-5 major error number from the HDF-5 error stack.
    + *  Extract the HDF5 major error number from the HDF5 error stack.
      */
     JNIEXPORT jlong JNICALL
     Java_hdf_hdf5lib_exceptions_HDF5LibraryException__1getMajorErrorNumber(JNIEnv *env, jobject obj)
    @@ -211,7 +211,7 @@ Java_hdf_hdf5lib_exceptions_HDF5LibraryException__1getMajorErrorNumber(JNIEnv *e
      * Method:    _getMinorErrorNumber
      * Signature: ()J
      *
    - *  Extract the HDF-5 minor error number from the HDF-5 error stack.
    + *  Extract the HDF5 minor error number from the HDF5 error stack.
      */
     JNIEXPORT jlong JNICALL
     Java_hdf_hdf5lib_exceptions_HDF5LibraryException__1getMinorErrorNumber(JNIEnv *env, jobject obj)
    @@ -350,10 +350,10 @@ h5raiseException(JNIEnv *env, const char *message, const char *exception)
     } /* end h5raiseException() */
     
     /*
    - *  h5libraryError()   determines the HDF-5 major error code
    + *  h5libraryError()   determines the HDF5 major error code
      *  and creates and throws the appropriate sub-class of
      *  HDF5LibraryException().  This routine should be called
    - *  whenever a call to the HDF-5 library fails, i.e., when
    + *  whenever a call to the HDF5 library fails, i.e., when
      *  the return is -1.
      *
      *  Note:  This routine never returns from the 'throw',
    @@ -436,7 +436,7 @@ done:
     
     /*
      *  defineHDF5LibraryException()  returns the name of the sub-class
    - *  which goes with an HDF-5 error code.
    + *  which goes with an HDF5 error code.
      */
     static const char *
     defineHDF5LibraryException(hid_t maj_num)
    diff --git a/java/src/jni/exceptionImp.h b/java/src/jni/exceptionImp.h
    index 38469df..c7375e7 100644
    --- a/java/src/jni/exceptionImp.h
    +++ b/java/src/jni/exceptionImp.h
    @@ -41,7 +41,7 @@ JNIEXPORT void JNICALL Java_hdf_hdf5lib_H5_H5error_1on(JNIEnv *env, jclass clss)
      * Method:    printStackTrace0
      * Signature: (Ljava/lang/Object;)V
      *
    - *  Call the HDF-5 library to print the HDF-5 error stack to 'file_name'.
    + *  Call the HDF5 library to print the HDF5 error stack to 'file_name'.
      */
     JNIEXPORT void JNICALL Java_hdf_hdf5lib_exceptions_HDF5LibraryException_printStackTrace0(JNIEnv *env,
                                                                                              jobject obj,
    diff --git a/java/src/jni/h5Constants.c b/java/src/jni/h5Constants.c
    index bdffdbd..0eff43e 100644
    --- a/java/src/jni/h5Constants.c
    +++ b/java/src/jni/h5Constants.c
    @@ -1210,6 +1210,11 @@ Java_hdf_hdf5lib_HDF5Constants_H5ES_1STATUS_1FAIL(JNIEnv *env, jclass cls)
     {
         return H5ES_STATUS_FAIL;
     }
    +JNIEXPORT jlong JNICALL
    +Java_hdf_hdf5lib_HDF5Constants_H5ES_1STATUS_1CANCELED(JNIEnv *env, jclass cls)
    +{
    +    return H5ES_STATUS_CANCELED;
    +}
     
     /* Java does not have unsigned native types */
     H5_GCC_CLANG_DIAG_OFF("sign-conversion")
    diff --git a/java/test/TestH5.java b/java/test/TestH5.java
    index b5bc3c1..062ea54 100644
    --- a/java/test/TestH5.java
    +++ b/java/test/TestH5.java
    @@ -313,7 +313,7 @@ public class TestH5 {
         @Test
         public void testH5get_libversion()
         {
    -        int libversion[] = {1, 13, 2};
    +        int libversion[] = {1, 13, 3};
     
             try {
                 H5.H5get_libversion(libversion);
    diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt
    index ded569b..694c191 100644
    --- a/release_docs/RELEASE.txt
    +++ b/release_docs/RELEASE.txt
    @@ -105,7 +105,11 @@ New Features
     
         Documentation:
         --------------
    -    -
    +    - Doxygen User Guide documentation is available when configured and generated.
    +      The resulting documentation files will be in the share/html subdirectory
    +      of the HDF5 install directory.
    +
    +        (ADB - 2022/08/09)
     
     
     Support for new platforms, languages and compilers
    diff --git a/src/H5ACpublic.h b/src/H5ACpublic.h
    index 0e03574..f9da6f6 100644
    --- a/src/H5ACpublic.h
    +++ b/src/H5ACpublic.h
    @@ -563,7 +563,7 @@ typedef struct H5AC_cache_config_t {
          * The value must lie in the interval [0.0, 1.0]. 0.01 is a good place to
          * start in the serial case. In the parallel case, a larger value is needed
          * -- see the overview of the metadata cache in the
    -     * “Metadata Caching in HDF5” section of the -- HDF5 User’s Guide
    +     * “Metadata Caching in HDF5” section of the -- \ref UG
          * for details. */
     
         size_t max_size;
    diff --git a/src/H5Amodule.h b/src/H5Amodule.h
    index 3586414..7f88a22 100644
    --- a/src/H5Amodule.h
    +++ b/src/H5Amodule.h
    @@ -28,30 +28,92 @@
     #define H5_MY_PKG     H5A
     #define H5_MY_PKG_ERR H5E_ATTR
     
    -/**\defgroup H5A H5A
    +/** \page H5A_UG HDF5 Attributes
      *
    - * Use the functions in this module to manage HDF5 attributes.
    + * \section sec_attribute HDF5 Attributes
      *
    - * Like HDF5 datasets, HDF5 attributes are array variables which have an element
    - * datatype and a shape (dataspace). However, they perform a different function:
    - * Attributes decorate other HDF5 objects, and are typically used to
    - * represent application metadata. Unlike datasets, the HDF5 library does not
    - * support partial I/O operations for attributes and they cannot be compressed
    - * or extended.
    + * An HDF5 attribute is a small metadata object describing the nature and/or intended usage of a primary data
    + * object. A primary data object may be a dataset, group, or committed datatype.
      *
    + * \subsection subsec_attribute_intro Introduction
    + *
    + * Attributes are assumed to be very small as data objects go, so storing them as standard HDF5 datasets would
    + * be quite inefficient. HDF5 attributes are therefore managed through a special attributes interface,
    + * \ref H5A, which is designed to easily attach attributes to primary data objects as small datasets
    + * containing metadata information and to minimize storage requirements.
    + *
    + * Consider, as examples of the simplest case, a set of laboratory readings taken under known temperature and
    + * pressure conditions of 18.0 degrees Celsius and 0.5 atmospheres, respectively. The temperature and pressure
    + * stored as attributes of the dataset could be described as the following name/value pairs:
    + *     \li temp=18.0
    + *     \li pressure=0.5
    + *
    + * While HDF5 attributes are not standard HDF5 datasets, they have much in common:
    + * \li An attribute has a user-defined dataspace and the included metadata has a user-assigned datatype
    + * \li Metadata can be of any valid HDF5 datatype
    + * \li Attributes are addressed by name
    + *
    + * But there are some very important differences:
    + * \li There is no provision for special storage such as compression or chunking
    + * \li There is no partial I/O or sub-setting capability for attribute data
    + * \li Attributes cannot be shared
    + * \li Attributes cannot have attributes
    + * \li Being small, an attribute is stored in the object header of the object it describes and is thus
    + * attached directly to that object
    + *
    + * \subsection subsec_error_H5A Attribute Function Summaries
    + * @see H5A reference manual
    + *
    + * \subsection subsec_attribute_program Programming Model for Attributes
    + *
    + * The figure below shows the UML model for an HDF5 attribute and its associated dataspace and datatype.
      * 
    - * 
    + * 
    + * 
    + * 
    + * 
    CreateRead
    + * \image html UML_Attribute.jpg "The UML model for an HDF5 attribute" + *
    + * + * Creating an attribute is similar to creating a dataset. To create an attribute, the application must + * specify the object to which the attribute is attached, the datatype and dataspace of the attribute + * data, and the attribute creation property list. + * + * The following steps are required to create and write an HDF5 attribute: + * \li Obtain the object identifier for the attribute’s primary data object + * \li Define the characteristics of the attribute and specify the attribute creation property list + *
    • Define the datatype
    • + *
    • Define the dataspace
    • + *
    • Specify the attribute creation property list
    + * \li Create the attribute + * \li Write the attribute data (optional) + * \li Close the attribute (and datatype, dataspace, and attribute creation property list, if necessary) + * \li Close the primary data object (if appropriate) + * + * The following steps are required to open and read/write an existing attribute. Since HDF5 attributes + * allow no partial I/O, you need specify only the attribute and the attribute’s memory datatype to read it: + * \li Obtain the object identifier for the attribute’s primary data object + * \li Obtain the attribute’s name or index + * \li Open the attribute + * \li Get attribute dataspace and datatype (optional) + * \li Specify the attribute’s memory type + * \li Read and/or write the attribute data + * \li Close the attribute + * \li Close the primary data object (if appropriate) + * + * + * * * * - * + * * * * *
    CreateUpdate
    * \snippet{lineno} H5A_examples.c create * - * \snippet{lineno} H5A_examples.c read + * \snippet{lineno} H5A_examples.c update *
    UpdateDelete
    ReadDelete
    - * \snippet{lineno} H5A_examples.c update + * \snippet{lineno} H5A_examples.c read * * \snippet{lineno} H5A_examples.c delete @@ -59,6 +121,266 @@ *
    * + * \subsection subsec_attribute_work Working with Attributes + * + * \subsubsection subsubsec_attribute_work_struct The Structure of an Attribute + * + * An attribute has two parts: name and value(s). + * + * HDF5 attributes are sometimes discussed as name/value pairs in the form name=value. + * + * An attribute’s name is a null-terminated ASCII or UTF-8 character string. Each attribute attached to an + * object has a unique name. + * + * The value portion of the attribute contains one or more data elements of the same datatype. + * + * HDF5 attributes have all the characteristics of HDF5 datasets except that there is no partial I/O + * capability. In other words, attributes can be written and read only in full with no sub-setting. + * + * \subsubsection subsubsec_attribute_work_create Creating, Writing, and Reading Attributes + * + * If attributes are used in an HDF5 file, these functions will be employed: \ref H5Acreate, \ref H5Awrite, + * and \ref H5Aread. \ref H5Acreate and \ref H5Awrite are used together to place the attribute in the file. If + * an attribute is to be used and is not currently in memory, \ref H5Aread generally comes into play + * usually in concert with one each of the H5Aget_* and H5Aopen_* functions. + * + * To create an attribute, call H5Acreate: + * \code + * hid_t H5Acreate (hid_t loc_id, const char *name, + * hid_t type_id, hid_t space_id, hid_t create_plist, + * hid_t access_plist) + * \endcode + * loc_id identifies the object (dataset, group, or committed datatype) to which the attribute is to be + * attached. name, type_id, space_id, and create_plist convey, respectively, the attribute’s name, datatype, + * dataspace, and attribute creation property list. The attribute’s name must be locally unique: it must be + * unique within the context of the object to which it is attached. + * + * \ref H5Acreate creates the attribute in memory. The attribute does not exist in the file until + * \ref H5Awrite writes it there. + * + * To write or read an attribute, call H5Awrite or H5Aread, respectively: + * \code + * herr_t H5Awrite (hid_t attr_id, hid_t mem_type_id, const void *buf) + * herr_t H5Aread (hid_t attr_id, hid_t mem_type_id, void *buf) + * \endcode + * attr_id identifies the attribute while mem_type_id identifies the in-memory datatype of the attribute data. + * + * \ref H5Awrite writes the attribute data from the buffer buf to the file. \ref H5Aread reads attribute data + * from the file into buf. + * + * The HDF5 Library converts the metadata between the in-memory datatype, mem_type_id, and the in-file + * datatype, defined when the attribute was created, without user intervention. + * + * \subsubsection subsubsec_attribute_work_access Accessing Attributes by Name or Index + * + * Attributes can be accessed by name or index value. The use of an index value makes it possible to iterate + * through all of the attributes associated with a given object. + * + * To access an attribute by its name, use the \ref H5Aopen_by_name function. \ref H5Aopen_by_name returns an + * attribute identifier that can then be used by any function that must access an attribute such as \ref + * H5Aread. Use the function \ref H5Aget_name to determine an attribute’s name. + * + * To access an attribute by its index value, use the \ref H5Aopen_by_idx function. To determine an attribute + * index value when it is not already known, use the H5Oget_info function. \ref H5Aopen_by_idx is generally + * used in the course of opening several attributes for later access. Use \ref H5Aiterate if the intent is to + * perform the same operation on every attribute attached to an object. + * + * \subsubsection subsubsec_attribute_work_info Obtaining Information Regarding an Object’s Attributes + * + * In the course of working with HDF5 attributes, one may need to obtain any of several pieces of information: + * \li An attribute name + * \li The dataspace of an attribute + * \li The datatype of an attribute + * \li The number of attributes attached to an object + * + * To obtain an attribute’s name, call H5Aget_name with an attribute identifier, attr_id: + * \code + * ssize_t H5Aget_name (hid_t attr_id, size_t buf_size, char *buf) + * \endcode + * As with other attribute functions, attr_id identifies the attribute; buf_size defines the size of the + * buffer; and buf is the buffer to which the attribute’s name will be read. + * + * If the length of the attribute name, and hence the value required for buf_size, is unknown, a first call + * to \ref H5Aget_name will return that size. If the value of buf_size used in that first call is too small, + * the name will simply be truncated in buf. A second \ref H5Aget_name call can then be used to retrieve the + * name in an appropriately-sized buffer. + * + * To determine the dataspace or datatype of an attribute, call \ref H5Aget_space or \ref H5Aget_type, + * respectively: \code hid_t H5Aget_space (hid_t attr_id) hid_t H5Aget_type (hid_t attr_id) \endcode \ref + * H5Aget_space returns the dataspace identifier for the attribute attr_id. \ref H5Aget_type returns the + * datatype identifier for the attribute attr_id. + * + * To determine the number of attributes attached to an object, use the \ref H5Oget_info function. The + * function signature is below. \code herr_t H5Oget_info( hid_t object_id, H5O_info_t *object_info ) \endcode + * The number of attributes will be returned in the object_info buffer. This is generally the preferred first + * step in determining attribute index values. If the call returns N, the attributes attached to the object + * object_id have index values of 0 through N-1. + * + * \subsubsection subsubsec_attribute_work_iterate Iterating across an Object’s Attributes + * + * It is sometimes useful to be able to perform the identical operation across all of the attributes attached + * to an object. At the simplest level, you might just want to open each attribute. At a higher level, you + * might wish to perform a rather complex operation on each attribute as you iterate across the set. + * + * To iterate an operation across the attributes attached to an object, one must make a series of calls to + * \ref H5Aiterate + * \code + * herr_t H5Aiterate (hid_t obj_id, H5_index_t index_type, + * H5_iter_order_t order, hsize_t *n, H5A_operator2_t op, + * void *op_data) + * \endcode + * \ref H5Aiterate successively marches across all of the attributes attached to the object specified in + * loc_id, performing the operation(s) specified in op_func with the data specified in op_data on each + * attribute. + * + * When \ref H5Aiterate is called, index contains the index of the attribute to be accessed in this call. When + * \ref H5Aiterate returns, index will contain the index of the next attribute. If the returned index is the + * null pointer, then all attributes have been processed, and the iterative process is complete. + * + * op_func is a user-defined operation that adheres to the \ref H5A_operator_t prototype. This prototype and + * certain requirements imposed on the operator’s behavior are described in the \ref H5Aiterate entry in the + * \ref RM. + * + * op_data is also user-defined to meet the requirements of op_func. Beyond providing a parameter with which + * to pass this data, HDF5 provides no tools for its management and imposes no restrictions. + * + * \subsubsection subsubsec_attribute_work_delete Deleting an Attribute + * + * Once an attribute has outlived its usefulness or is no longer appropriate, it may become necessary to + * delete it. + * + * To delete an attribute, call \ref H5Adelete + * \code + * herr_t H5Adelete (hid_t loc_id, const char *name) + * \endcode + * \ref H5Adelete removes the attribute name from the group, dataset, or committed datatype specified in + * loc_id. + * + * \ref H5Adelete must not be called if there are any open attribute identifiers on the object loc_id. Such a + * call can cause the internal attribute indexes to change; future writes to an open attribute would then + * produce unintended results. + * + * \subsubsection subsubsec_attribute_work_close Closing an Attribute + * + * As is the case with all HDF5 objects, once access to an attribute it is no longer needed, that attribute + * must be closed. It is best practice to close it as soon as practicable; it is mandatory that it be closed + * prior to the H5close call closing the HDF5 Library. + * + * To close an attribute, call \ref H5Aclose + * \code + * herr_t H5Aclose (hid_t attr_id) + * \endcode + * \ref H5Aclose closes the specified attribute by terminating access to its identifier, attr_id. + * + * \subsection subsec_attribute_special Special Issues + * + * Some special issues for attributes are discussed below. + * + *

    Large Numbers of Attributes Stored in Dense Attribute Storage

    + * + * The dense attribute storage scheme was added in version 1.8 so that datasets, groups, and committed + * datatypes that have large numbers of attributes could be processed more quickly. + * + * Attributes start out being stored in an object's header. This is known as compact storage. For more + * information, see "Storage Strategies." + * + * As the number of attributes grows, attribute-related performance slows. To improve performance, dense + * attribute storage can be initiated with the H5Pset_attr_phase_change function. See the HDF5 Reference + * Manual for more information. + * + * When dense attribute storage is enabled, a threshold is defined for the number of attributes kept in + * compact storage. When the number is exceeded, the library moves all of the attributes into dense storage + * at another location. The library handles the movement of attributes and the pointers between the locations + * automatically. If some of the attributes are deleted so that the number falls below the threshold, then + * the attributes are moved back to compact storage by the library. + * + * The improvements in performance from using dense attribute storage are the result of holding attributes + * in a heap and indexing the heap with a B-tree. + * + * Note that there are some disadvantages to using dense attribute storage. One is that this is a new feature. + * Datasets, groups, and committed datatypes that use dense storage cannot be read by applications built with + * earlier versions of the library. Another disadvantage is that attributes in dense storage cannot be + * compressed. + * + *

    Large Attributes Stored in Dense Attribute Storage

    + * + * We generally consider the maximum size of an attribute to be 64K bytes. The library has two ways of storing + * attributes larger than 64K bytes: in dense attribute storage or in a separate dataset. Using dense + * attribute storage is described in this section, and storing in a separate dataset is described in the next + * section. + * + * To use dense attribute storage to store large attributes, set the number of attributes that will be stored + * in compact storage to 0 with the H5Pset_attr_phase_change function. This will force all attributes to be + * put into dense attribute storage and will avoid the 64KB size limitation for a single attribute in compact + * attribute storage. + * + * The example code below illustrates how to create a large attribute that will be kept in dense storage. + * + * + * + * + * + * + *
    Create
    + * \snippet{lineno} H5A_examples.c create + *
    + * + *

    Large Attributes Stored in a Separate Dataset

    + * + * In addition to dense attribute storage (see above), a large attribute can be stored in a separate dataset. + * In the figure below, DatasetA holds an attribute that is too large for the object header in Dataset1. By + * putting a pointer to DatasetA as an attribute in Dataset1, the attribute becomes available to those + * working with Dataset1. + * This way of handling large attributes can be used in situations where backward compatibility is important + * and where compression is important. Applications built with versions before 1.8.x can read large + * attributes stored in separate datasets. Datasets can be compressed while attributes cannot. + * + * + * + * + *
    + * \image html Shared_Attribute.jpg "A large or shared HDF5 attribute and its associated dataset(s)" + *
    + * Note: In the figure above, DatasetA is an attribute of Dataset1 that is too large to store in Dataset1's + * header. DatasetA is associated with Dataset1 by means of an object reference pointer attached as an + * attribute to Dataset1. The attribute in DatasetA can be shared among multiple datasets by means of + * additional object reference pointers attached to additional datasets. + * + *

    Shared Attributes

    + * + * Attributes written and managed through the \ref H5A interface cannot be shared. If shared attributes are + * required, they must be handled in the manner described above for large attributes and illustrated in + * the figure above. + * + *

    Attribute Names

    + * + * While any ASCII or UTF-8 character may be used in the name given to an attribute, it is usually wise + * to avoid the following kinds of characters: + * \li Commonly used separators or delimiters such as slash, backslash, colon, and semi-colon (\, /, :, ;) + * \li Escape characters + * \li Wild cards such as asterisk and question mark (*, ?) + * NULL can be used within a name, but HDF5 names are terminated with a NULL: whatever comes after the NULL + * will be ignored by HDF5. + * + * The use of ASCII or UTF-8 characters is determined by the character encoding property. See + * #H5Pset_char_encoding in the \ref RM. + * + *

    No Special I/O or Storage

    + * + * HDF5 attributes have all the characteristics of HDF5 datasets except the following: + * \li Attributes are written and read only in full: there is no provision for partial I/O or sub-setting + * \li No special storage capability is provided for attributes: there is no compression or chunking, and + * attributes are not extendable + * + * Previous Chapter \ref sec_dataspace - Next Chapter \ref sec_error + * + * \defgroup H5A Attributes (H5A) + * + * An HDF5 attribute is a small metadata object describing the nature and/or intended usage of a primary data + * object. A primary data object may be a dataset, group, or committed datatype. + * + * @see sec_attribute + * */ #endif /* H5Amodule_H */ diff --git a/src/H5Dmodule.h b/src/H5Dmodule.h index a05d717..474efd9 100644 --- a/src/H5Dmodule.h +++ b/src/H5Dmodule.h @@ -28,7 +28,2961 @@ #define H5_MY_PKG H5D #define H5_MY_PKG_ERR H5E_DATASET -/**\defgroup H5D H5D +/** \page H5D_UG HDF5 Datasets + * + * \section sec_dataset HDF5 Datasets + * + * \subsection subsec_dataset_intro Introduction + * + * An HDF5 dataset is an object composed of a collection of data elements, or raw data, and + * metadata that stores a description of the data elements, data layout, and all other information + * necessary to write, read, and interpret the stored data. From the viewpoint of the application the + * raw data is stored as a one-dimensional or multi-dimensional array of elements (the raw data), + * those elements can be any of several numerical or character types, small arrays, or even + * compound types similar to C structs. The dataset object may have attribute objects. See the + * figure below. + * + * + * + * + * + *
    + * \image html Dsets_fig1.gif "Application view of a dataset" + *
    + * + * A dataset object is stored in a file in two parts: a header and a data array. The header contains + * information that is needed to interpret the array portion of the dataset, as well as metadata (or + * pointers to metadata) that describes or annotates the dataset. Header information includes the + * name of the object, its dimensionality, its number-type, information about how the data itself is + * stored on disk (the storage layout), and other information used by the library to speed up access + * to the dataset or maintain the file’s integrity. + * + * The HDF5 dataset interface, comprising the @ref H5D functions, provides a mechanism for managing + * HDF5 datasets including the transfer of data between memory and disk and the description of + * dataset properties. + * + * A dataset is used by other HDF5 APIs, either by name or by an identifier. For more information, + * \see \ref api-compat-macros. + * + * \subsubsection subsubsec_dataset_intro_link Link/Unlink + * A dataset can be added to a group with one of the H5Lcreate calls, and deleted from a group with + * #H5Ldelete. The link and unlink operations use the name of an object, which may be a dataset. + * The dataset does not have to open to be linked or unlinked. + * + * \subsubsection subsubsec_dataset_intro_obj Object Reference + * A dataset may be the target of an object reference. The object reference is created by + * #H5Rcreate with the name of an object which may be a dataset and the reference type + * #H5R_OBJECT. The dataset does not have to be open to create a reference to it. + * + * An object reference may also refer to a region (selection) of a dataset. The reference is created + * with #H5Rcreate and a reference type of #H5R_DATASET_REGION. + * + * An object reference can be accessed by a call to #H5Rdereference. When the reference is to a + * dataset or dataset region, the #H5Rdereference call returns an identifier to the dataset just as if + * #H5Dopen has been called. + * + * \subsubsection subsubsec_dataset_intro_attr Adding Attributes + * A dataset may have user-defined attributes which are created with #H5Acreate and accessed + * through the @ref H5A API. To create an attribute for a dataset, the dataset must be open, and the + * identifier is passed to #H5Acreate. The attributes of a dataset are discovered and opened using + * #H5Aopen_name, #H5Aopen_idx, or #H5Aiterate; these functions use the identifier of the dataset. + * An attribute can be deleted with #H5Adelete which also uses the identifier of the dataset. + * + * \subsection subsec_dataset_function Dataset Function Summaries + * Functions that can be used with datasets (@ref H5D functions) and property list functions that can + * used with datasets (@ref H5P functions) are listed below. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    Dataset functions
    FunctionPurpose
    #H5DcreateCreates a dataset at the specified location. The + * C function is a macro: \see \ref api-compat-macros.
    #H5Dcreate_anonCreates a dataset in a file without linking it into the file structure.
    #H5DopenOpens an existing dataset. The C function is a macro: \see \ref api-compat-macros.
    #H5DcloseCloses the specified dataset.
    #H5Dget_spaceReturns an identifier for a copy of the dataspace for a dataset.
    #H5Dget_space_statusDetermines whether space has been allocated for a dataset.
    #H5Dget_typeReturns an identifier for a copy of the datatype for a dataset.
    #H5Dget_create_plistReturns an identifier for a copy of the dataset creation property list for a dataset.
    #H5Dget_access_plistReturns the dataset access property list associated with a dataset.
    #H5Dget_offsetReturns the dataset address in a file.
    #H5Dget_storage_sizeReturns the amount of storage required for a dataset.
    #H5Dvlen_get_buf_sizeDetermines the number of bytes required to store variable-length (VL) data.
    #H5Dvlen_reclaimReclaims VL datatype memory buffers.
    #H5DreadReads raw data from a dataset into a buffer.
    #H5DwriteWrites raw data from a buffer to a dataset.
    #H5DiterateIterates over all selected elements in a dataspace.
    #H5DgatherGathers data from a selection within a memory buffer.
    #H5DscatterScatters data into a selection within a memory buffer.
    #H5DfillFills dataspace elements with a fill value in a memory buffer.
    #H5Dset_extentChanges the sizes of a dataset’s dimensions.
    + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    Dataset creation property list functions (H5P)
    FunctionPurpose
    #H5Pset_layoutSets the type of storage used to store the raw data for a dataset.
    #H5Pget_layoutReturns the layout of the raw data for a dataset.
    #H5Pset_chunkSets the size of the chunks used to store a chunked layout dataset.
    #H5Pget_chunkRetrieves the size of chunks for the raw data of a chunked layout dataset.
    #H5Pset_deflateSets compression method and compression level.
    #H5Pset_fill_valueSets the fill value for a dataset.
    #H5Pget_fill_valueRetrieves a dataset fill value.
    #H5Pfill_value_definedDetermines whether the fill value is defined.
    #H5Pset_fill_timeSets the time when fill values are written to a dataset.
    #H5Pget_fill_timeRetrieves the time when fill value are written to a dataset.
    #H5Pset_alloc_timeSets the timing for storage space allocation.
    #H5Pget_alloc_timeRetrieves the timing for storage space allocation.
    #H5Pset_filterAdds a filter to the filter pipeline.
    #H5Pall_filters_availVerifies that all required filters are available.
    #H5Pget_nfiltersReturns the number of filters in the pipeline.
    #H5Pget_filterReturns information about a filter in a pipeline. + * The C function is a macro: \see \ref api-compat-macros.
    #H5Pget_filter_by_idReturns information about the specified filter. + * The C function is a macro: \see \ref api-compat-macros.
    #H5Pmodify_filterModifies a filter in the filter pipeline.
    #H5Premove_filterDeletes one or more filters in the filter pipeline.
    #H5Pset_fletcher32Sets up use of the Fletcher32 checksum filter.
    #H5Pset_nbitSets up use of the n-bit filter.
    #H5Pset_scaleoffsetSets up use of the scale-offset filter.
    #H5Pset_shuffleSets up use of the shuffle filter.
    #H5Pset_szipSets up use of the Szip compression filter.
    #H5Pset_externalAdds an external file to the list of external files.
    #H5Pget_external_countReturns the number of external files for a dataset.
    #H5Pget_externalReturns information about an external file.
    #H5Pset_char_encodingSets the character encoding used to encode a string. Use to set ASCII or UTF-8 character + * encoding for object names.
    #H5Pget_char_encodingRetrieves the character encoding used to create a string.
    + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    Dataset access property list functions (H5P)
    FunctionPurpose
    #H5Pset_bufferSets type conversion and background buffers.
    #H5Pget_bufferReads buffer settings.
    #H5Pset_chunk_cacheSets the raw data chunk cache parameters.
    #H5Pget_chunk_cacheRetrieves the raw data chunk cache parameters.
    #H5Pset_edc_checkSets whether to enable error-detection when reading a dataset.
    #H5Pget_edc_checkDetermines whether error-detection is enabled for dataset reads.
    #H5Pset_filter_callbackSets user-defined filter callback function.
    #H5Pset_data_transformSets a data transform expression.
    #H5Pget_data_transformRetrieves a data transform expression.
    #H5Pset_type_conv_cbSets user-defined datatype conversion callback function.
    #H5Pget_type_conv_cbGets user-defined datatype conversion callback function.
    #H5Pset_hyper_vector_sizeSets number of I/O vectors to be read/written in hyperslab I/O.
    #H5Pget_hyper_vector_sizeRetrieves number of I/O vectors to be read/written in hyperslab I/O.
    #H5Pset_btree_ratiosSets B-tree split ratios for a dataset transfer property list.
    #H5Pget_btree_ratiosGets B-tree split ratios for a dataset transfer property list.
    #H5Pset_vlen_mem_managerSets the memory manager for variable-length datatype allocation in #H5Dread and + * #H5Dvlen_reclaim.
    #H5Pget_vlen_mem_managerGets the memory manager for variable-length datatype allocation in #H5Dread and + * #H5Dvlen_reclaim.
    #H5Pset_dxpl_mpioSets data transfer mode.
    #H5Pget_dxpl_mpioReturns the data transfer mode.
    #H5Pset_dxpl_mpio_chunk_optSets a flag specifying linked-chunk I/O or multi-chunk I/O.
    #H5Pset_dxpl_mpio_chunk_opt_numSets a numeric threshold for linked-chunk I/O.
    #H5Pset_dxpl_mpio_chunk_opt_ratioSets a ratio threshold for collective I/O.
    #H5Pset_dxpl_mpio_collective_optSets a flag governing the use of independent versus collective I/O.
    #H5Pset_multi_typeSets the type of data property for the MULTI driver.
    #H5Pget_multi_typeRetrieves the type of data property for the MULTI driver.
    #H5Pset_small_data_block_sizeSets the size of a contiguous block reserved for small data.
    #H5Pget_small_data_block_sizeRetrieves the current small data block size setting.
    + * + * \subsection subsec_dataset_program Programming Model for Datasets + * This section explains the programming model for datasets. + * + * \subsubsection subsubsec_dataset_program_general General Model + * + * The programming model for using a dataset has three main phases: + * \li Obtain access to the dataset + * \li Operate on the dataset using the dataset identifier returned at access + * \li Release the dataset + * + * These three phases or steps are described in more detail below the figure. + * + * A dataset may be opened several times and operations performed with several different + * identifiers to the same dataset. All the operations affect the dataset although the calling program + * must synchronize if necessary to serialize accesses. + * + * Note that the dataset remains open until every identifier is closed. The figure below shows the + * basic sequence of operations. + * + * + * + * + * + *
    + * \image html Dsets_fig2.gif "Dataset programming sequence" + *
    + * + * Creation and data access operations may have optional parameters which are set with property + * lists. The general programming model is: + * \li Create property list of appropriate class (dataset create, dataset transfer) + * \li Set properties as needed; each type of property has its own format and datatype + * \li Pass the property list as a parameter of the API call + * + * The steps below describe the programming phases or steps for using a dataset. + *

    Step 1. Obtain Access

    + * A new dataset is created by a call to #H5Dcreate. If successful, the call returns an identifier for the + * newly created dataset. + * + * Access to an existing dataset is obtained by a call to #H5Dopen. This call returns an identifier for + * the existing dataset. + * + * An object reference may be dereferenced to obtain an identifier to the dataset it points to. + * + * In each of these cases, the successful call returns an identifier to the dataset. The identifier is + * used in subsequent operations until the dataset is closed. + * + *

    Step 2. Operate on the Dataset

    + * The dataset identifier can be used to write and read data to the dataset, to query and set + * properties, and to perform other operations such as adding attributes, linking in groups, and + * creating references. + * + * The dataset identifier can be used for any number of operations until the dataset is closed. + * + *

    Step 3. Close the Dataset

    + * When all operations are completed, the dataset identifier should be closed with a call to + * #H5Dclose. This releases the dataset. + * + * After the identifier is closed, it cannot be used for further operations. + * + * \subsubsection subsubsec_dataset_program_create Create Dataset + * + * A dataset is created and initialized with a call to #H5Dcreate. The dataset create operation sets + * permanent properties of the dataset: + * \li Name + * \li Dataspace + * \li Datatype + * \li Storage properties + * + * These properties cannot be changed for the life of the dataset, although the dataspace may be + * expanded up to its maximum dimensions. + * + *

    Name

    + * A dataset name is a sequence of alphanumeric ASCII characters. The full name would include a + * tracing of the group hierarchy from the root group of the file. An example is + * /rootGroup/groupA/subgroup23/dataset1. The local name or relative name within the lowest- + * level group containing the dataset would include none of the group hierarchy. An example is + * Dataset1. + * + *

    Dataspace

    + * The dataspace of a dataset defines the number of dimensions and the size of each dimension. The + * dataspace defines the number of dimensions, and the maximum dimension sizes and current size + * of each dimension. The maximum dimension size can be a fixed value or the constant + * #H5S_UNLIMITED, in which case the actual dimension size can be changed with calls to + * #H5Dset_extent, up to the maximum set with the maxdims parameter in the #H5Screate_simple + * call that established the dataset’s original dimensions. The maximum dimension size is set when + * the dataset is created and cannot be changed. + * + *

    Datatype

    + * Raw data has a datatype which describes the layout of the raw data stored in the file. The + * datatype is set when the dataset is created and can never be changed. When data is transferred to + * and from the dataset, the HDF5 library will assure that the data is transformed to and from the + * stored format. + * + *

    Storage Properties

    + * Storage properties of the dataset are set when it is created. The required inputs table below shows + * the categories of storage properties. The storage properties cannot be changed after the dataset is + * created. + * + *

    Filters

    + * When a dataset is created, optional filters are specified. The filters are added to the data transfer + * pipeline when data is read or written. The standard library includes filters to implement + * compression, data shuffling, and error detection code. Additional user-defined filters may also be + * used. + * + * The required filters are stored as part of the dataset, and the list may not be changed after the + * dataset is created. The HDF5 library automatically applies the filters whenever data is + * transferred. + * + *

    Summary

    + * + * A newly created dataset has no attributes and no data values. The dimensions, datatype, storage + * properties, and selected filters are set. The table below lists the required inputs, and the second + * table below lists the optional inputs. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    Required inputs
    Required InputsDescription
    DataspaceThe shape of the array.
    DatatypeThe layout of the stored elements.
    NameThe name of the dataset in the group.
    + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    Optional inputs
    Optional InputsDescription
    Storage LayoutHow the data is organized in the file including chunking.
    Fill ValueThe behavior and value for uninitialized data.
    External StorageOption to store the raw data in an external file.
    FiltersSelect optional filters to be applied. One of the filters that might be applied is compression.
    + * + *

    Example

    + * To create a new dataset, go through the following general steps: + * \li Set dataset characteristics (optional where default settings are acceptable) + * \li Datatype + * \li Dataspace + * \li Dataset creation property list + * \li Create the dataset + * \li Close the datatype, dataspace, and property list (as necessary) + * \li Close the dataset + * + * Example 1 below shows example code to create an empty dataset. The dataspace is 7 x 8, and the + * datatype is a big-endian integer. The dataset is created with the name “dset1” and is a member of + * the root group, “/”. + * + * Example 1. Create an empty dataset + * \code + * hid_t dataset, datatype, dataspace; + * + * // Create dataspace: Describe the size of the array and create the dataspace for fixed-size dataset. + * dimsf[0] = 7; + * dimsf[1] = 8; + * dataspace = H5Screate_simple(2, dimsf, NULL); + * + * // Define datatype for the data in the file. + * // For this example, store little-endian integer numbers. + * datatype = H5Tcopy(H5T_NATIVE_INT); + * status = H5Tset_order(datatype, H5T_ORDER_LE); + * + * // Create a new dataset within the file using defined + * // dataspace and datatype. No properties are set. + * dataset = H5Dcreate(file, "/dset", datatype, dataspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + * H5Dclose(dataset); + * H5Sclose(dataspace); + * H5Tclose(datatype); + * \endcode + * + * Example 2, below, shows example code to create a similar dataset with a fill value of ‘-1’. This + * code has the same steps as in the example above, but uses a non-default property list. A file + * creation property list is created, and then the fill value is set to the desired value. Then the + * property list is passed to the #H5Dcreate call. + * + * Example 2. Create a dataset with fill value set + * \code + * hid_t plist; // property list + * hid_t dataset, datatype, dataspace; + * int fillval = -1; + * + * dimsf[0] = 7; + * dimsf[1] = 8; + * dataspace = H5Screate_simple(2, dimsf, NULL); + * datatype = H5Tcopy(H5T_NATIVE_INT); + * status = H5Tset_order(datatype, H5T_ORDER_LE); + * + * // Example of Dataset Creation property list: set fill value to '-1' + * plist = H5Pcreate(H5P_DATASET_CREATE); + * status = H5Pset_fill_value(plist, datatype, &fillval); + * + * // Same as above, but use the property list + * dataset = H5Dcreate(file, "/dset", datatype, dataspace, H5P_DEFAULT, plist, H5P_DEFAULT); + * H5Dclose(dataset); + * H5Sclose(dataspace); + * H5Tclose(datatype); + * H5Pclose(plist); + * \endcode + * + * After this code is executed, the dataset has been created and written to the file. The data array is + * uninitialized. Depending on the storage strategy and fill value options that have been selected, + * some or all of the space may be allocated in the file, and fill values may be written in the file. + * + * \subsubsection subsubsec_dataset_program_transfer Data Transfer Operations on a Dataset + * Data is transferred between memory and the raw data array of the dataset through #H5Dwrite and + * #H5Dread operations. A data transfer has the following basic steps: + * \li 1. Allocate and initialize memory space as needed + * \li 2. Define the datatype of the memory elements + * \li 3. Define the elements to be transferred (a selection, or all the elements) + * \li 4. Set data transfer properties (including parameters for filters or file drivers) as needed + * \li 5. Call the @ref H5D API + * + * Note that the location of the data in the file, the datatype of the data in the file, the storage + * properties, and the filters do not need to be specified because these are stored as a permanent part + * of the dataset. A selection of elements from the dataspace is specified; the selected elements may + * be the whole dataspace. + * + * The following figure shows a diagram of a write operation which + * transfers a data array from memory to a dataset in the file (usually on disk). A read operation has + * similar parameters with the data flowing the other direction. + * + * + * + * + * + *
    + * \image html Dsets_fig3.gif "A write operation" + *
    + * + *

    Memory Space

    + * The calling program must allocate sufficient memory to store the data elements to be transferred. + * For a write (from memory to the file), the memory must be initialized with the data to be written + * to the file. For a read, the memory must be large enough to store the elements that will be read. + * The amount of storage needed can be computed from the memory datatype (which defines the + * size of each data element) and the number of elements in the selection. + * + *

    Memory Datatype

    + * The memory layout of a single data element is specified by the memory datatype. This specifies + * the size, alignment, and byte order of the element as well as the datatype class. Note that the + * memory datatype must be the same datatype class as the file, but may have different byte order + * and other properties. The HDF5 Library automatically transforms data elements between the + * source and destination layouts. For more information, \ref sec_datatype. + * + * For a write, the memory datatype defines the layout of the data to be written; an example is IEEE + * floating-point numbers in native byte order. If the file datatype (defined when the dataset is + * created) is different but compatible, the HDF5 Library will transform each data element when it + * is written. For example, if the file byte order is different than the native byte order, the HDF5 + * library will swap the bytes. + * + * For a read, the memory datatype defines the desired layout of the data to be read. This must be + * compatible with the file datatype, but should generally use native formats such as byte orders. + * The HDF5 library will transform each data element as it is read. + * + *

    Selection

    + * The data transfer will transfer some or all of the elements of the dataset depending on the + * dataspace selection. The selection has two dataspace objects: one for the source, and one for the + * destination. These objects describe which elements of the dataspace to be transferred. Some + * (partial I/O) or all of the data may be transferred. Partial I/O is defined by defining hyperslabs or + * lists of elements in a dataspace object. + * + * The dataspace selection for the source defines the indices of the elements to be read or written. + * The two selections must define the same number of points, but the order and layout may be + * different. The HDF5 Library automatically selects and distributes the elements according to the + * selections. It might, for example, perform a scatter-gather or sub-set of the data. + * + *

    Data Transfer Properties

    + * For some data transfers, additional parameters should be set using the transfer property list. The + * table below lists the categories of transfer properties. These properties set parameters for the + * HDF5 Library and may be used to pass parameters for optional filters and file drivers. For + * example, transfer properties are used to select independent or collective operation when using + * MPI-I/O. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    Categories of transfer properties
    PropertiesDescription
    Library parametersInternal caches, buffers, B-Trees, etc.
    Memory managementVariable-length memory management, data overwrite
    File driver managementParameters for file drivers
    Filter managementParameters for filters
    + * + *

    Data Transfer Operation (Read or Write)

    + * The data transfer is done by calling #H5Dread or #H5Dwrite with the parameters described above. + * The HDF5 Library constructs the required pipeline, which will scatter-gather, transform + * datatypes, apply the requested filters, and use the correct file driver. + * + * During the data transfer, the transformations and filters are applied to each element of the data in + * the required order until all the data is transferred. + * + *

    Summary

    + * To perform a data transfer, it is necessary to allocate and initialize memory, describe the source + * and destination, set required and optional transfer properties, and call the \ref H5D API. + * + *

    Examples

    + * The basic procedure to write to a dataset is the following: + * \li Open the dataset. + * \li Set the dataset dataspace for the write (optional if dataspace is #H5S_ALL). + * \li Write data. + * \li Close the datatype, dataspace, and property list (as necessary). + * \li Close the dataset. + * + * Example 3 below shows example code to write a 4 x 6 array of integers. In the example, the data + * is initialized in the memory array dset_data. The dataset has already been created in the file, so it + * is opened with H5Dopen. + * + * The data is written with #H5Dwrite. The arguments are the dataset identifier, the memory + * datatype (#H5T_NATIVE_INT), the memory and file selections (#H5S_ALL in this case: the + * whole array), and the default (empty) property list. The last argument is the data to be + * transferred. + * + * Example 3. Write an array of integers + * \code + * hid_t file_id, dataset_id; // identifiers + * herr_t status; + * int i, j, dset_data[4][6]; + * + * // Initialize the dataset. + * for (i = 0; i < 4; i++) + * for (j = 0; j < 6; j++) + * dset_data[i][j] = i * 6 + j + 1; + * + * // Open an existing file. + * file_id = H5Fopen("dset.h5", H5F_ACC_RDWR, H5P_DEFAULT); + * + * // Open an existing dataset. + * dataset_id = H5Dopen(file_id, "/dset", H5P_DEFAULT); + * + * // Write the entire dataset, using 'dset_data': memory type is 'native int' + * // write the entire dataspace to the entire dataspace, no transfer properties + * status = H5Dwrite(dataset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, dset_data); + * + * status = H5Dclose(dataset_id); + * \endcode + * + * Example 4 below shows a similar write except for setting a non-default value for the transfer + * buffer. The code is the same as Example 3, but a transfer property list is created, and the desired + * buffer size is set. The #H5Dwrite function has the same arguments, but uses the property list to set + * the buffer. + * + * Example 4. Write an array using a property list + * \code + * hid_t file_id, dataset_id; + * hid_t xferplist; + * herr_t status; + * int i, j, dset_data[4][6]; + * + * file_id = H5Fopen("dset.h5", H5F_ACC_RDWR, H5P_DEFAULT); + * dataset_id = H5Dopen(file_id, "/dset", H5P_DEFAULT); + * + * // Example: set type conversion buffer to 64MB + * xferplist = H5Pcreate(H5P_DATASET_XFER); + * status = H5Pset_buffer( xferplist, 64 * 1024 *1024, NULL, NULL); + * + * // Write the entire dataset, using 'dset_data': memory type is 'native int' + * write the entire dataspace to the entire dataspace, set the buffer size with the property list + * status = H5Dwrite(dataset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xferplist, dset_data); + * + * status = H5Dclose(dataset_id); + * \endcode + * + * The basic procedure to read from a dataset is the following: + * \li Define the memory dataspace of the read (optional if dataspace is #H5S_ALL). + * \li Open the dataset. + * \li Get the dataset dataspace (if using #H5S_ALL above). + * + * Else define dataset dataspace of read. + * \li Define the memory datatype (optional). + * \li Define the memory buffer. + * \li Open the dataset. + * \li Read data. + * \li Close the datatype, dataspace, and property list (as necessary). + * \li Close the dataset. + * + * The example below shows code that reads a 4 x 6 array of integers from a dataset called “dset1”. + * First, the dataset is opened. The #H5Dread call has parameters: + * \li The dataset identifier (from #H5Dopen) + * \li The memory datatype (#H5T_NATIVE_INT) + * \li The memory and file dataspace (#H5S_ALL, the whole array) + * \li A default (empty) property list + * \li The memory to be filled + * + * Example 5. Read an array from a dataset + * \code + * hid_t file_id, dataset_id; + * herr_t status; + * int i, j, dset_data[4][6]; + * + * // Open an existing file. + * file_id = H5Fopen("dset.h5", H5F_ACC_RDWR, H5P_DEFAULT); + * + * // Open an existing dataset. + * dataset_id = H5Dopen(file_id, "/dset", H5P_DEFAULT); + * + * // read the entire dataset, into 'dset_data': memory type is 'native int' + * // read the entire dataspace to the entire dataspace, no transfer properties, + * status = H5Dread(dataset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, dset_data); + * + * status = H5Dclose(dataset_id); + * \endcode + * + * \subsubsection subsubsec_dataset_program_read Retrieve the Properties of a Dataset + * The functions listed below allow the user to retrieve information regarding a dataset including + * the datatype, the dataspace, the dataset creation property list, and the total stored size of the data. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    Retrieve dataset information
    Query FunctionDescription
    H5Dget_spaceRetrieve the dataspace of the dataset as stored in the file.
    H5Dget_typeRetrieve the datatype of the dataset as stored in the file.
    H5Dget_create_plistRetrieve the dataset creation properties.
    H5Dget_storage_sizeRetrieve the total bytes for all the data of the dataset.
    H5Dvlen_get_buf_sizeRetrieve the total bytes for all the variable-length data of the dataset.
    + * + * The example below illustrates how to retrieve dataset information. + * + * Example 6. Retrieve dataset + * \code + * hid_t file_id, dataset_id; + * hid_t dspace_id, dtype_id, plist_id; + * herr_t status; + * + * // Open an existing file. + * file_id = H5Fopen("dset.h5", H5F_ACC_RDWR, H5P_DEFAULT); + * + * // Open an existing dataset. + * dataset_id = H5Dopen(file_id, "/dset", H5P_DEFAULT); + * dspace_id = H5Dget_space(dataset_id); + * dtype_id = H5Dget_type(dataset_id); + * plist_id = H5Dget_create_plist(dataset_id); + * + * // use the objects to discover the properties of the dataset + * status = H5Dclose(dataset_id); + * \endcode + * + * \subsection subsec_dataset_transfer Data Transfer + * The HDF5 library implements data transfers through a pipeline which implements data + * transformations (according to the datatype and selections), chunking (as requested), and I/O + * operations using different mechanisms (file drivers). The pipeline is automatically configured by + * the HDF5 library. Metadata is stored in the file so that the correct pipeline can be constructed to + * retrieve the data. In addition, optional filters such as compression may be added to the standard + * pipeline. + * + * The figure below illustrates data layouts for different layers of an application using HDF5. The + * application data is organized as a multidimensional array of elements. The HDF5 format + * specification defines the stored layout of the data and metadata. The storage layout properties + * define the organization of the abstract data. This data is written to and read from some storage + * medium. + * + * + * + * + * + *
    + * \image html Dsets_fig4.gif "Data layouts in an application" + *
    + * + * The last stage of a write (and first stage of a read) is managed by an HDF5 file driver module. + * The virtual file layer of the HDF5 Library implements a standard interface to alternative I/O + * methods, including memory (AKA “core”) files, single serial file I/O, multiple file I/O, and + * parallel I/O. The file driver maps a simple abstract HDF5 file to the specific access methods. + * + * The raw data of an HDF5 dataset is conceived to be a multidimensional array of data elements. + * This array may be stored in the file according to several storage strategies: + * \li Contiguous + * \li Chunked + * \li Compact + * + * The storage strategy does not affect data access methods except that certain operations may be + * more or less efficient depending on the storage strategy and the access patterns. + * + * Overall, the data transfer operations (#H5Dread and #H5Dwrite) work identically for any storage + * method, for any file driver, and for any filters and transformations. The HDF5 library + * automatically manages the data transfer process. In some cases, transfer properties should or + * must be used to pass additional parameters such as MPI/IO directives when using the parallel file + * driver. + * + * \subsubsection subsubsec_dataset_transfer_pipe The Data Pipeline + * When data is written or read to or from an HDF5 file, the HDF5 library passes the data through a + * sequence of processing steps which are known as the HDF5 data pipeline. This data pipeline + * performs operations on the data in memory such as byte swapping, alignment, scatter-gather, and + * hyperslab selections. The HDF5 library automatically determines which operations are needed + * and manages the organization of memory operations such as extracting selected elements from a + * data block. The data pipeline modules operate on data buffers: each module processes a buffer + * and passes the transformed buffer to the next stage. + * + * The table below lists the stages of the data pipeline. The figure below the table shows the order + * of processing during a read or write. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    Stages of the data pipeline
    LayersDescription
    I/O initiationInitiation of HDF5 I/O activities (#H5Dwrite and #H5Dread) in a user’s application program.
    Memory hyperslab operationData is scattered to (for read), or gathered from (for write) the application’s memory buffer + * (bypassed if no datatype conversion is needed).
    Datatype conversionDatatype is converted if it is different between memory and storage (bypassed if no datatype + * conversion is needed).
    File hyperslab operationData is gathered from (for read), or scattered to (for write) to file space in memory (bypassed + * if no datatype conversion is needed).
    Filter pipelineData is processed by filters when it passes. Data can be modified and restored here (bypassed + * if no datatype conversion is needed, no filter is enabled, or dataset is not chunked).
    Virtual File LayerFacilitate easy plug-in file drivers such as MPIO or POSIX I/O.
    Actual I/OActual file driver used by the library such as MPIO or STDIO.
    + * + * + * + * + * + *
    + * \image html Dsets_fig5.gif "The processing order in the data pipeline" + *
    + * + * The HDF5 library automatically applies the stages as needed. + * + * When the memory dataspace selection is other than the whole dataspace, the memory hyperslab + * stage scatters/gathers the data elements between the application memory (described by the + * selection) and a contiguous memory buffer for the pipeline. On a write, this is a gather operation; + * on a read, this is a scatter operation. + * + * When the memory datatype is different from the file datatype, the datatype conversion stage + * transforms each data element. For example, if data is written from 32-bit big-endian memory, + * and the file datatype is 32-bit little-endian, the datatype conversion stage will swap the bytes of + * every element. Similarly, when data is read from the file to native memory, byte swapping will + * be applied automatically when needed. + * + * The file hyperslab stage is similar to the memory hyperslab stage, but is managing the + * arrangement of the elements according to the dataspace selection. When data is read, data + * elements are gathered from the data blocks from the file to fill the contiguous buffers which are + * then processed by the pipeline. When data is read, the elements from a buffer are scattered to the + * data blocks of the file. + * + * \subsubsection subsubsec_dataset_transfer_filter Data Pipeline Filters + * In addition to the standard pipeline, optional stages, called filters, can be inserted in the pipeline. + * The standard distribution includes optional filters to implement compression and error checking. + * User applications may add custom filters as well. + * + * The HDF5 library distribution includes or employs several optional filters. These are listed in the + * table below. The filters are applied in the pipeline between the virtual file layer and the file + * hyperslab operation. See the figure above. The application can use any number of filters in any + * order. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    Data pipeline filters
    FilterDescription
    gzip compressionData compression using zlib.
    Szip compressionData compression using the Szip library. See The HDF Group website for more information + * regarding the Szip filter.
    N-bit compressionData compression using an algorithm specialized for n-bit datatypes.
    Scale-offset compressionData compression using a “scale and offset” algorithm.
    ShufflingTo improve compression performance, data is regrouped by its byte position in the data + * unit. In other words, the 1st, 2nd, 3rd, and 4th bytes of integers are stored together + * respectively.
    Fletcher32Fletcher32 checksum for error-detection.
    + * + * Filters may be used only for chunked data and are applied to chunks of data between the file + * hyperslab stage and the virtual file layer. At this stage in the pipeline, the data is organized as + * fixed-size blocks of elements, and the filter stage processes each chunk separately. + * + * Filters are selected by dataset creation properties, and some behavior may be controlled by data + * transfer properties. The library determines what filters must be applied and applies them in the + * order in which they were set by the application. That is, if an application calls + * #H5Pset_shuffle and then #H5Pset_deflate when creating a dataset’s creation property list, the + * library will apply the shuffle filter first and then the deflate filter. + * + * For more information, + * \li @see @ref subsubsec_dataset_filters_nbit + * \li @see @ref subsubsec_dataset_filters_scale + * + * \subsubsection subsubsec_dataset_transfer_drive File Drivers + * I/O is performed by the HDF5 virtual file layer. The file driver interface writes and reads blocks + * of data; each driver module implements the interface using different I/O mechanisms. The table + * below lists the file drivers currently supported. Note that the I/O mechanisms are separated from + * the pipeline processing: the pipeline and filter operations are identical no matter what data access + * mechanism is used. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    I/O file drivers
    File DriverDescription
    #H5FD_COREStore in memory (optional backing store to disk file).
    #H5FD_FAMILYStore in a set of files.
    #H5FD_LOGStore in logging file.
    #H5FD_MPIOStore using MPI/IO.
    #H5FD_MULTIStore in multiple files. There are several options to control layout.
    #H5FD_SEC2Serial I/O to file using Unix “section 2” functions.
    #H5FD_STDIOSerial I/O to file using Unix “stdio” functions.
    + * + * Each file driver writes/reads contiguous blocks of bytes from a logically contiguous address + * space. The file driver is responsible for managing the details of the different physical storage + * methods. + * + * In serial environments, everything above the virtual file layer tends to work identically no matter + * what storage method is used. + * + * Some options may have substantially different performance depending on the file driver that is + * used. In particular, multi-file and parallel I/O may perform considerably differently from serial + * drivers depending on chunking and other settings. + * + * \subsubsection subsubsec_dataset_transfer_props Data Transfer Properties to Manage the Pipeline + * Data transfer properties set optional parameters that control parts of the data pipeline. The + * function listing below shows transfer properties that control the behavior of the library. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    Data transfer property list functions
    C FunctionPurpose
    #H5Pset_bufferMaximum size for the type conversion buffer and the background buffer. May also supply + * pointers to application-allocated buffers.
    #H5Pset_hyper_vector_sizeset the number of "I/O vectors" (offset and length pairs) which are to be + * accumulated in memory before being issued to the lower levels + * of the library for reading or writing the actual data.
    #H5Pset_btree_ratiosSet the B-tree split ratios for a dataset transfer property list. The split ratios determine + * what percent of children go in the first node when a node splits.
    + * + * Some filters and file drivers require or use additional parameters from the application program. + * These can be passed in the data transfer property list. The table below shows file driver property + * list functions. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    File driver property list functions
    C FunctionPurpose
    #H5Pset_dxpl_mpioControl the MPI I/O transfer mode (independent or collective) during data I/O operations.
    #H5Pset_small_data_block_sizeReserves blocks of size bytes for the contiguous storage of the raw data portion of small + * datasets. The HDF5 Library then writes the raw data from small datasets to this reserved space + * which reduces unnecessary discontinuities within blocks of metadata and improves + * I/O performance.
    #H5Pset_edc_checkDisable/enable EDC checking for read. When selected, EDC is always written.
    + * + * The transfer properties are set in a property list which is passed as a parameter of the #H5Dread or + * #H5Dwrite call. The transfer properties are passed to each pipeline stage. Each stage may use or + * ignore any property in the list. In short, there is one property list that contains all the properties. + * + * \subsubsection subsubsec_dataset_transfer_store Storage Strategies + * The raw data is conceptually a multi-dimensional array of elements that is stored as a contiguous + * array of bytes. The data may be physically stored in the file in several ways. The table below lists + * the storage strategies for a dataset. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    Dataset storage strategies
    Storage StrategyDescription
    ContiguousThe dataset is stored as one continuous array of bytes.
    Chunked The dataset is stored as fixed-size chunks.
    CompactA small dataset is stored in the metadata header.
    + * + * The different storage strategies do not affect the data transfer operations of the dataset: reads and + * writes work the same for any storage strategy. + * + * These strategies are described in the following sections. + * + *

    Contiguous

    + * A contiguous dataset is stored in the file as a header and a single continuous array of bytes. See + * the figure below. In the case of a multi-dimensional array, the data is serialized in row major order. By + * default, data is stored contiguously. + * + * + * + * + * + *
    + * \image html Dsets_fig6.gif "Contiguous data storage" + *
    + * + * Contiguous storage is the simplest model. It has several limitations. First, the dataset must be a + * fixed-size: it is not possible to extend the limit of the dataset or to have unlimited dimensions. In + * other words, if the number of dimensions of the array might change over time, then chunking + * storage must be used instead of contiguous. Second, because data is passed through the pipeline + * as fixed-size blocks, compression and other filters cannot be used with contiguous data. + * + *

    Chunked

    + * The data of a dataset may be stored as fixed-size chunks. A chunk is a hyper- + * rectangle of any shape. When a dataset is chunked, each chunk is read or written as a single I/O + * operation, and individually passed from stage to stage of the data pipeline. + * + * + * + * + * + *
    + * \image html Dsets_fig7.gif "Chunked data storage" + *
    + * + * Chunks may be any size and shape that fits in the dataspace of the dataset. For example, a three + * dimensional dataspace can be chunked as 3-D cubes, 2-D planes, or 1-D lines. The chunks may + * extend beyond the size of the dataspace. For example, a 3 x 3 dataset might by chunked in 2 x 2 + * chunks. Sufficient chunks will be allocated to store the array, and any extra space will not be + * accessible. So, to store the 3 x 3 array, four 2 x 2 chunks would be allocated with 5 unused + * elements stored. + * + * Chunked datasets can be unlimited in any direction and can be compressed or filtered. + * + * Since the data is read or written by chunks, chunking can have a dramatic effect on performance + * by optimizing what is read and written. Note, too, that for specific access patterns such as + * parallel I/O, decomposition into chunks can have a large impact on performance. + * + * Two restrictions have been placed on chunk shape and size: + *
    • The rank of a chunk must be less than or equal to the rank of the dataset
    • + *
    • Chunk size cannot exceed the size of a fixed-size dataset; for example, a dataset consisting of + * a 5 x 4 fixed-size array cannot be defined with 10 x 10 chunks
    + * + *

    Compact

    + * For contiguous and chunked storage, the dataset header information and data are stored in two + * (or more) blocks. Therefore, at least two I/O operations are required to access the data: one to + * access the header, and one (or more) to access data. For a small dataset, this is considerable + * overhead. + * + * A small dataset may be stored in a continuous array of bytes in the header block using the + * compact storage option. This dataset can be read entirely in one operation which retrieves the + * header and data. The dataset must fit in the header. This may vary depending on the metadata + * that is stored. In general, a compact dataset should be approximately 30 KB or less total size. + * + * + * + * + * + *
    + * \image html Dsets_fig8.gif "Compact data storage" + *
    + * + * \subsubsection subsubsec_dataset_transfer_partial Partial I/O Sub‐setting and Hyperslabs + * Data transfers can write or read some of the data elements of the dataset. This is controlled by + * specifying two selections: one for the source and one for the destination. Selections are specified + * by creating a dataspace with selections. + * + * Selections may be a union of hyperslabs or a list of points. A hyperslab is a contiguous hyper- + * rectangle from the dataspace. Selected fields of a compound datatype may be read or written. In + * this case, the selection is controlled by the memory and file datatypes. + * + * Summary of procedure: + * \li 1. Open the dataset + * \li 2. Define the memory datatype + * \li 3. Define the memory dataspace selection and file dataspace selection + * \li 4. Transfer data (#H5Dread or #H5Dwrite) + * + * For more information, + * @see @ref sec_dataspace + * + * \subsection subsec_dataset_allocation Allocation of Space in the File + * When a dataset is created, space is allocated in the file for its header and initial data. The amount +of space allocated when the dataset is created depends on the storage properties. When the +dataset is modified (data is written, attributes added, or other changes), additional storage may be +allocated if necessary. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    Initial dataset size
    ObjectSize
    HeaderVariable, but typically around 256 bytes at the creation of a simple dataset with a simple + * datatype.
    DataSize of the data array (number of elements x size of element). Space allocated in + * the file depends on the storage strategy and the allocation strategy.
    + * + *

    Header

    + * A dataset header consists of one or more header messages containing persistent metadata + * describing various aspects of the dataset. These records are defined in the HDF5 File Format + * Specification. The amount of storage required for the metadata depends on the metadata to be + * stored. The table below summarizes the metadata. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    Metadata storage sizes
    Header InformationApproximate Storage Size
    Datatype (required)Bytes or more. Depends on type.
    Dataspace (required)Bytes or more. Depends on number of dimensions and hsize_t.
    Layout (required)Points to the stored data. Bytes or more. Depends on hsize_t and number of dimensions.
    FiltersDepends on the number of filters. The size of the filter message depends on the name and + * data that will be passed.
    + * + * The header blocks also store the name and values of attributes, so the total storage depends on + * the number and size of the attributes. + * + * In addition, the dataset must have at least one link, including a name, which is stored in the file + * and in the group it is linked from. + * + * The different storage strategies determine when and how much space is allocated for the data + * array. See the discussion of fill values below for a detailed explanation of the storage allocation. + * + *

    Contiguous Storage

    + * For a continuous storage option, the data is stored in a single, contiguous block in the file. The + * data is nominally a fixed-size, (number of elements x size of element). The figure below shows + * an example of a two dimensional array stored as a contiguous dataset. + * + * Depending on the fill value properties, the space may be allocated when the dataset is created or + * when first written (default), and filled with fill values if specified. For parallel I/O, by default the + * space is allocated when the dataset is created. + * + * + * + * + * + *
    + * \image html Dsets_fig9.gif "A two dimensional array stored as a contiguous dataset" + *
    + * + *

    Chunked Storage

    + * For chunked storage, the data is stored in one or more chunks. Each chunk is a continuous block + * in the file, but chunks are not necessarily stored contiguously. Each chunk has the same size. The + * data array has the same nominal size as a contiguous array (number of elements x size of + * element), but the storage is allocated in chunks, so the total size in the file can be larger than the + * nominal size of the array. See the figure below. + * + * If a fill value is defined, each chunk will be filled with the fill value. Chunks must be allocated + * when data is written, but they may be allocated when the file is created, as the file expands, or + * when data is written. + * + * For serial I/O, by default chunks are allocated incrementally, as data is written to the chunk. For + * a sparse dataset, chunks are allocated only for the parts of the dataset that are written. In this + * case, if the dataset is extended, no storage is allocated. + * + * For parallel I/O, by default chunks are allocated when the dataset is created or extended with fill + * values written to the chunk. + * + * In either case, the default can be changed using fill value properties. For example, using serial + * I/O, the properties can select to allocate chunks when the dataset is created. + * + * + * + * + * + *
    + * \image html Dsets_fig10.gif "A two dimensional array stored in chunks" + *
    + * + *

    Changing Dataset Dimensions

    + * #H5Dset_extent is used to change the current dimensions of the dataset within the limits of the + * dataspace. Each dimension can be extended up to its maximum or unlimited. Extending the + * dataspace may or may not allocate space in the file and may or may not write fill values, if they + * are defined. See the example code below. + * + * The dimensions of the dataset can also be reduced. If the sizes specified are smaller than the + * dataset’s current dimension sizes, #H5Dset_extent will reduce the dataset’s dimension sizes to the + * specified values. It is the user’s responsibility to ensure that valuable data is not lost; + * #H5Dset_extent does not check. + * + * Using #H5Dset_extent to increase the size of a dataset + * \code + * hid_t file_id, dataset_id; + * herr_t status; + * size_t newdims[2]; + * + * // Open an existing file. + * file_id = H5Fopen("dset.h5", H5F_ACC_RDWR, H5P_DEFAULT); + * + * // Open an existing dataset. + * dataset_id = H5Dopen(file_id, "/dset", H5P_DEFAULT); + * + * // Example: dataset is 2 x 3, each dimension is UNLIMITED + * // extend to 2 x 7 + * newdims[0] = 2; + * newdims[1] = 7; + * status = H5Dset_extent(dataset_id, newdims); + * + * // dataset is now 2 x 7 + * + * status = H5Dclose(dataset_id); + * \endcode + * + * \subsubsection subsubsec_dataset_allocation_store Storage Allocation in the File: Early, Incremental, Late + * The HDF5 Library implements several strategies for when storage is allocated if and when it is + * filled with fill values for elements not yet written by the user. Different strategies are + * recommended for different storage layouts and file drivers. In particular, a parallel program + * needs storage allocated during a collective call (for example, create or extend), while serial + * programs may benefit from delaying the allocation until the data is written. + * + * Two file creation properties control when to allocate space, when to write the fill value, and the + * actual fill value to write. + * + *

    When to Allocate Space

    + * The table below shows the options for when data is allocated in the file. Early allocation is done + * during the dataset create call. Certain file drivers (especially MPI-I/O and MPI-POSIX) require + * space to be allocated when a dataset is created, so all processors will have the correct view of the + * data. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    File storage allocation options
    StrategyDescription
    EarlyAllocate storage for the dataset immediately when the dataset is created.
    LateDefer allocating space for storing the dataset until the dataset is written.
    IncrementalDefer allocating space for storing each chunk until the chunk is written.
    DefaultUse the strategy (Early, Late, or Incremental) for the storage method and + * access method. This is the recommended strategy.
    + * + * Late allocation is done at the time of the first write to dataset. Space for the whole dataset is + * allocated at the first write. + * + * Incremental allocation (chunks only) is done at the time of the first write to the chunk. Chunks + * that have never been written are not allocated in the file. In a sparsely populated dataset, this + * option allocates chunks only where data is actually written. + * + * The “Default” property selects the option recommended as appropriate for the storage method + * and access method. The defaults are shown in the table below. Note that Early allocation is + * recommended for all Parallel I/O, while other options are recommended as the default for serial + * I/O cases. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    Default storage options
    Storage TypeSerial I/OParallel I/O
    ContiguousLateEarly
    ChunkedIncrementalEarly
    CompactEarlyEarly
    + * + *

    When to Write the Fill Value

    + * The second property is when to write the fill value. The possible values are “Never” and + * “Allocation”. The table below shows these options. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    When to write fill values
    WhenDescription
    NeverFill value will never be written.
    AllocationFill value is written when space is allocated. (Default for chunked and contiguous + * data storage.)
    + * + *

    What Fill Value to Write

    + * The third property is the fill value to write. The table below shows the values. By default, the + * data is filled with zeros. The application may choose no fill value (Undefined). In this case, + * uninitialized data may have random values. The application may define a fill value of an + * appropriate type. For more information, @see @ref subsec_datatype_fill. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    Fill values to write
    What to WriteDescription
    DefaultBy default, the library fills allocated space with zeros.
    UndefinedAllocated space is filled with random values.
    User-definedThe application specifies the fill value.
    + * + * Together these three properties control the library’s behavior. The table below summarizes the + * possibilities during the dataset create-write-close cycle. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    Storage allocation and fill summary
    When to allocate spaceWhen to write fill valueWhat fill value to writeLibrary create-write-close behavior
    EarlyNever-Library allocates space when dataset is created, but never writes a fill value to dataset. A read + * of unwritten data returns undefined values.
    LateNever-Library allocates space when dataset is written to, but never writes a fill value to the dataset. A + * read of unwritten data returns undefined values.
    IncrementalNever-Library allocates space when a dataset or chunk (whichever is the smallest unit of space) + * is written to, but it never writes a fill value to a dataset or a chunk. A read of unwritten data + * returns undefined values.
    -AllocationUndefinedError on creating the dataset. The dataset is not created.
    EarlyAllocationDefault or User-definedAllocate space for the dataset when the dataset is created. Write the fill value (default or + * user-defined) to the entire dataset when the dataset is created.
    LateAllocationDefault or User-defineAllocate space for the dataset when the application first writes data values to the dataset. + * Write the fill value to the entire dataset before writing application data values.
    IncrementalAllocationDefault or User-defineAllocate space for the dataset when the application first writes data values to the dataset or + * chunk (whichever is the smallest unit of space). Write the fill value to the entire dataset + * or chunk before writing application data values.
    + * + * During the #H5Dread function call, the library behavior depends on whether space has been + * allocated, whether the fill value has been written to storage, how the fill value is defined, and + * when to write the fill value. The table below summarizes the different behaviors. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    H5Dread summary
    Is space allocated in the file?What is the fill value?When to write the fill value?Library read behavior
    NoUndefinedanytimeError. Cannot create this dataset.
    NoDefault or User-defineanytimeFill the memory buffer with the fill value.
    YesUndefinedanytimeReturn data from storage (dataset). Trash is possible if the application has not written data + * to the portion of the dataset being read.
    YesDefault or User-defineNeverReturn data from storage (dataset). Trash is possible if the application has not written data + * to the portion of the dataset being read.
    YesDefault or User-defineAllocationReturn data from storage (dataset).
    + * + * There are two cases to consider depending on whether the space in the file has been allocated + * before the read or not. When space has not yet been allocated and if a fill value is defined, the + * memory buffer will be filled with the fill values and returned. In other words, no data has been + * read from the disk. If space has been allocated, the values are returned from the stored data. The + * unwritten elements will be filled according to the fill value. + * + * \subsubsection subsubsec_dataset_allocation_delete Deleting a Dataset from a File and Reclaiming Space + * HDF5 does not at this time provide an easy mechanism to remove a dataset from a file or to + * reclaim the storage space occupied by a deleted object. + * + * Removing a dataset and reclaiming the space it used can be done with the #H5Ldelete function + * and the h5repack utility program. With the H5Ldelete function, links to a dataset can be removed + * from the file structure. After all the links have been removed, the dataset becomes inaccessible to + * any application and is effectively removed from the file. The way to recover the space occupied + * by an unlinked dataset is to write all of the objects of the file into a new file. Any unlinked object + * is inaccessible to the application and will not be included in the new file. Writing objects to a + * new file can be done with a custom program or with the h5repack utility program. + * + * For more information, @see @ref sec_group + * + * \subsubsection subsubsec_dataset_allocation_release Releasing Memory Resources + * The system resources required for HDF5 objects such as datasets, datatypes, and dataspaces + * should be released once access to the object is no longer needed. This is accomplished via the + * appropriate close function. This is not unique to datasets but a general requirement when + * working with the HDF5 Library; failure to close objects will result in resource leaks. + * + * In the case where a dataset is created or data has been transferred, there are several objects that + * must be closed. These objects include datasets, datatypes, dataspaces, and property lists. + * + * The application program must free any memory variables and buffers it allocates. When + * accessing data from the file, the amount of memory required can be determined by calculating + * the size of the memory datatype and the number of elements in the memory selection. + * + * Variable-length data are organized in two or more areas of memory. For more information, + * \see \ref h4_vlen_datatype "Variable-length Datatypes". + * + * When writing data, the application creates an array of + * vl_info_t which contains pointers to the elements. The elements might be, for example, strings. + * In the file, the variable-length data is stored in two parts: a heap with the variable-length values + * of the data elements and an array of vl_info_t elements. When the data is read, the amount of + * memory required for the heap can be determined with the #H5Dvlen_get_buf_size call. + * + * The data transfer property may be used to set a custom memory manager for allocating variable- + * length data for a #H5Dread. This is set with the #H5Pset_vlen_mem_manager call. + * To free the memory for variable-length data, it is necessary to visit each element, free the + * variable-length data, and reset the element. The application must free the memory it has + * allocated. For memory allocated by the HDF5 Library during a read, the #H5Dvlen_reclaim + * function can be used to perform this operation. + * + * \subsubsection subsubsec_dataset_allocation_ext External Storage Properties + * The external storage format allows data to be stored across a set of non-HDF5 files. A set of + * segments (offsets and sizes) in one or more files is defined as an external file list, or EFL, and + * the contiguous logical addresses of the data storage are mapped onto these segments. Currently, + * only the #H5D_CONTIGUOUS storage format allows external storage. External storage is + * enabled by a dataset creation property. The table below shows the API. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    External storage API
    FunctionDescription
    #H5Pset_externalThis function adds a new segment to the end of the external file list of the specified dataset + * creation property list. The segment begins a byte offset of file name and continues for size + * bytes. The space represented by this segment is adjacent to the space already represented by + * the external file list. The last segment in a file list may have the size #H5F_UNLIMITED, in + * which case the external file may be of unlimited size and no more files can be added to the + * external files list.
    #H5Pget_external_countCalling this function returns the number of segments in an external file list. If the dataset + * creation property list has no external data, then zero is returned.
    #H5Pget_externalThis is the counterpart for the #H5Pset_external function. Given a dataset creation + * property list and a zero-based index into that list, the file name, byte offset, and segment + * size are returned through non-null arguments. At most name_size characters are copied into + * the name argument which is not null terminated if the file name is longer than the + * supplied name buffer (this is similar to strncpy()).
    + * + * The figure below shows an example of how a contiguous, one-dimensional dataset is partitioned + * into three parts and each of those parts is stored in a segment of an external file. The top + * rectangle represents the logical address space of the dataset while the bottom rectangle represents + * an external file. + * + * + * + * + * + *
    + * \image html Dsets_fig11.gif "External file storage" + *
    + * + * The example below shows code that defines the external storage for the example. Note that the + * segments are defined in order of the logical addresses they represent, not their order within the + * external file. It would also have been possible to put the segments in separate files. Care should + * be taken when setting up segments in a single file since the library does not automatically check + * for segments that overlap. + * + * External storage + * \code + * plist = H5Pcreate (H5P_DATASET_CREATE); + * H5Pset_external (plist, "velocity.data", 3000, 1000); + * H5Pset_external (plist, "velocity.data", 0, 2500); + * H5Pset_external (plist, "velocity.data", 4500, 1500); + * \endcode + * + * The figure below shows an example of how a contiguous, two-dimensional dataset is partitioned + * into three parts and each of those parts is stored in a separate external file. The top rectangle + * represents the logical address space of the dataset while the bottom rectangles represent external + * files. + * + * + * + * + * + *
    + * \image html Dsets_fig12.gif "Partitioning a 2-D dataset for external storage" + *
    + * + * The example below shows code for the partitioning described above. In this example, the library + * maps the multi-dimensional array onto a linear address space as defined by the HDF5 format + * specification, and then maps that address space into the segments defined in the external file list. + * + * Partitioning a 2-D dataset for external storage + * \code + * plist = H5Pcreate (H5P_DATASET_CREATE); + * H5Pset_external (plist, "scan1.data", 0, 24); + * H5Pset_external (plist, "scan2.data", 0, 24); + * H5Pset_external (plist, "scan3.data", 0, 16); + * \endcode + * + * The segments of an external file can exist beyond the end of the (external) file. The library reads + * that part of a segment as zeros. When writing to a segment that exists beyond the end of a file, + * the external file is automatically extended. Using this feature, one can create a segment (or set of + * segments) which is larger than the current size of the dataset. This allows the dataset to be + * extended at a future time (provided the dataspace also allows the extension). + * + * All referenced external data files must exist before performing raw data I/O on the dataset. This + * is normally not a problem since those files are being managed directly by the application or + * indirectly through some other library. However, if the file is transferred from its original context, + * care must be taken to assure that all the external files are accessible in the new location. + * + * \subsection subsec_dataset_filters Using HDF5 Filters + * This section describes in detail how to use the n-bit, scale-offset filters and szip filters. + * + * \subsubsection subsubsec_dataset_filters_nbit Using the N‐bit Filter + * N-bit data has n significant bits, where n may not correspond to a precise number of bytes. On + * the other hand, computing systems and applications universally, or nearly so, run most efficiently + * when manipulating data as whole bytes or multiple bytes. + * + * Consider the case of 12-bit integer data. In memory, that data will be handled in at least 2 bytes, + * or 16 bits, and on some platforms in 4 or even 8 bytes. The size of such a dataset can be + * significantly reduced when written to disk if the unused bits are stripped out. + * + * The n-bit filter is provided for this purpose, packing n-bit data on output by stripping off all + * unused bits and unpacking on input, restoring the extra bits required by the computational + * processor. + * + *

    N-bit Datatype

    + * An n-bit datatype is a datatype of n significant bits. Unless it is packed, an n-bit datatype is + * presented as an n-bit bitfield within a larger-sized value. For example, a 12-bit datatype might be + * presented as a 12-bit field in a 16-bit, or 2-byte, value. + * + * Currently, the datatype classes of n-bit datatype or n-bit field of a compound datatype or an array + * datatype are limited to integer or floating-point. + * + * The HDF5 user can create an n-bit datatype through a series of function calls. For example, the + * following calls create a 16-bit datatype that is stored in a 32-bit value with a 4-bit offset: + * \code + * hid_t nbit_datatype = H5Tcopy(H5T_STD_I32LE); + * H5Tset_precision(nbit_datatype, 16); + * H5Tset_offset(nbit_datatype, 4); + * \endcode + * + * In memory, one value of the above example n-bit datatype would be stored on a little-endian + * machine as follows: + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    byte 3byte 2byte 1byte 0
    ????????????SPPPPPPPPPPPPPPP????
    + * Note: Key: S - sign bit, E - exponent bit, M - mantissa bit, ? - padding bit. Sign bit is + * included in signed integer datatype precision. + *
    + * + *

    N-bit Filter

    + * When data of an n-bit datatype is stored on disk using the n-bit filter, the filter packs the data by + * stripping off the padding bits; only the significant bits are retained and stored. The values on disk + * will appear as follows: + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    1st value2nd valuenth value
    SPPPPPPP PPPPPPPPSPPPPPPP PPPPPPPP...
    + * Note: Key: S - sign bit, E - exponent bit, M - mantissa bit, ? - padding bit. Sign bit is + * included in signed integer datatype precision. + *
    + * + *

    How Does the N-bit Filter Work?

    + * The n-bit filter always compresses and decompresses according to dataset properties supplied by + * the HDF5 library in the datatype, dataspace, or dataset creation property list. + * + * The dataset datatype refers to how data is stored in an HDF5 file while the memory datatype + * refers to how data is stored in memory. The HDF5 library will do datatype conversion when + * writing data in memory to the dataset or reading data from the dataset to memory if the memory + * datatype differs from the dataset datatype. Datatype conversion is performed by HDF5 library + * before n-bit compression and after n-bit decompression. + * + * The following sub-sections examine the common cases: + * \li N-bit integer conversions + * \li N-bit floating-point conversions + * + *

    N-bit Integer Conversions

    + * Integer data with a dataset of integer datatype of less than full precision and a memory datatype + * of #H5T_NATIVE_INT, provides the simplest application of the n-bit filter. + * + * The precision of #H5T_NATIVE_INT is 8 multiplied by sizeof(int). This value, the size of an + * int in bytes, differs from platform to platform; we assume a value of 4 for the following + * illustration. We further assume the memory byte order to be little-endian. + * + * In memory, therefore, the precision of #H5T_NATIVE_INT is 32 and the offset is 0. One value of + * #H5T_NATIVE_INT is laid out in memory as follows: + * + * + * + * + *
    + * \image html Dsets_NbitInteger1.gif "H5T_NATIVE_INT in memory"
    + * Note: Key: S - sign bit, E - exponent bit, M - mantissa bit, ? - padding bit. Sign bit is + * included in signed integer datatype precision. + *
    + * + * Suppose the dataset datatype has a precision of 16 and an offset of 4. After HDF5 converts + * values from the memory datatype to the dataset datatype, it passes something like the following + * to the n-bit filter for compression: + * + * + * + * + *
    + * \image html Dsets_NbitInteger2.gif "Passed to the n-bit filter"
    + * Note: Key: S - sign bit, E - exponent bit, M - mantissa bit, ? - padding bit. Sign bit is + * included in signed integer datatype precision. + *
    + * + * Notice that only the specified 16 bits (15 significant bits and the sign bit) are retained in the + * conversion. All other significant bits of the memory datatype are discarded because the dataset + * datatype calls for only 16 bits of precision. After n-bit compression, none of these discarded bits, + * known as padding bits will be stored on disk. + * + *

    N-bit Floating-point Conversions

    + * Things get more complicated in the case of a floating-point dataset datatype class. This sub- + * section provides an example that illustrates the conversion from a memory datatype of + * #H5T_NATIVE_FLOAT to a dataset datatype of class floating-point. + * + * As before, let the #H5T_NATIVE_FLOAT be 4 bytes long, and let the memory byte order be + * little-endian. Per the IEEE standard, one value of #H5T_NATIVE_FLOAT is laid out in memory + * as follows: + * + * + * + * + *
    + * \image html Dsets_NbitFloating1.gif "H5T_NATIVE_FLOAT in memory"
    + * Note: Key: S - sign bit, E - exponent bit, M - mantissa bit, ? - padding bit. Sign bit is + * included in floating-point datatype precision. + *
    + * + * Suppose the dataset datatype has a precision of 20, offset of 7, mantissa size of 13, mantissa + * position of 7, exponent size of 6, exponent position of 20, and sign position of 26. For more + * information, @see @ref subsubsec_datatype_program_define. + * + * After HDF5 converts values from the memory datatype to the dataset datatype, it passes + * something like the following to the n-bit filter for compression: + * + * + * + * + *
    + * \image html Dsets_NbitFloating2.gif "Passed to the n-bit filter"
    + * Note: Key: S - sign bit, E - exponent bit, M - mantissa bit, ? - padding bit. Sign bit is + * included in floating-point datatype precision. + *
    + * + * The sign bit and truncated mantissa bits are not changed during datatype conversion by the + * HDF5 library. On the other hand, the conversion of the 8-bit exponent to a 6-bit exponent is a + * little tricky: + * + * The bias for the new exponent in the n-bit datatype is: + * + * 2(n-1)-1 + * + * + * The following formula is used for this exponent conversion:
    + * + * exp8 - (2(8-1) -1) = exp6 - (2(6-1)-1) = actual exponent value + *
    + * where exp8 is the stored decimal value as represented by the 8-bit exponent, and exp6 is the + * stored decimal value as represented by the 6-bit exponent. + * + * In this example, caution must be taken to ensure that, after conversion, the actual exponent value + * is within the range that can be represented by a 6-bit exponent. For example, an 8-bit exponent + * can represent values from -127 to 128 while a 6-bit exponent can represent values only from -31 + * to 32. + * + *

    N-bit Filter Behavior

    + * The n-bit filter was designed to treat the incoming data byte by byte at the lowest level. The + * purpose was to make the n-bit filter as generic as possible so that no pointer cast related to the + * datatype is needed. + * + * Bitwise operations are employed for packing and unpacking at the byte level. + * + * Recursive function calls are used to treat compound and array datatypes. + * + *

    N-bit Compression

    + * The main idea of n-bit compression is to use a loop to compress each data element in a chunk. + * Depending on the datatype of each element, the n-bit filter will call one of four functions. Each + * of these functions performs one of the following tasks: + * \li Compress a data element of a no-op datatype + * \li Compress a data element of an atomic datatype + * \li Compress a data element of a compound datatype + * \li Compress a data element of an array datatype + * + * No-op datatypes: The n-bit filter does not actually compress no-op datatypes. Rather, it copies + * the data buffer of the no-op datatype from the non-compressed buffer to the proper location in + * the compressed buffer; the compressed buffer has no holes. The term “compress” is used here + * simply to distinguish this function from the function that performs the reverse operation during + * decompression. + * + * Atomic datatypes: The n-bit filter will find the bytes where significant bits are located and try to + * compress these bytes, one byte at a time, using a loop. At this level, the filter needs the following + * information: + *
    • The byte offset of the beginning of the current data element with respect to the + * beginning of the input data buffer
    • + *
    • Datatype size, precision, offset, and byte order
    + * + * The n-bit filter compresses from the most significant byte containing significant bits to the least + * significant byte. For big-endian data, therefore, the loop index progresses from smaller to larger + * while for little-endian, the loop index progresses from larger to smaller. + * + * In the extreme case of when the n-bit datatype has full precision, this function copies the content + * of the entire non-compressed datatype to the compressed output buffer. + * + * Compound datatypes: The n-bit filter will compress each data member of the compound + * datatype. If the member datatype is of an integer or floating-point datatype, the n-bit filter will + * call the function described above. If the member datatype is of a no-op datatype, the filter will + * call the function described above. If the member datatype is of a compound datatype, the filter + * will make a recursive call to itself. If the member datatype is of an array datatype, the filter will + * call the function described below. + * + * Array datatypes: The n-bit filter will use a loop to compress each array element in the array. If + * the base datatype of array element is of an integer or floating-point datatype, the n-bit filter will + * call the function described above. If the base datatype is of a no-op datatype, the filter will call + * the function described above. If the base datatype is of a compound datatype, the filter will call + * the function described above. If the member datatype is of an array datatype, the filter will make + * a recursive call of itself. + * + *

    N-bit Decompression

    + * The n-bit decompression algorithm is very similar to n-bit compression. The only difference is + * that at the byte level, compression packs out all padding bits and stores only significant bits into + * a continuous buffer (unsigned char) while decompression unpacks significant bits and inserts + * padding bits (zeros) at the proper positions to recover the data bytes as they existed before + * compression. + * + *

    Storing N-bit Parameters to Array cd_value[]

    + * All of the information, or parameters, required by the n-bit filter are gathered and stored in the + * array cd_values[] by the private function H5Z__set_local_nbit and are passed to another private + * function, H5Z__filter_nbit, by the HDF5 Library. + * These parameters are as follows: + * \li Parameters related to the datatype + * \li The number of elements within the chunk + * \li A flag indicating whether compression is needed + * + * The first and second parameters can be obtained using the HDF5 dataspace and datatype + * interface calls. + * + * A compound datatype can have members of array or compound datatype. An array datatype’s + * base datatype can be a complex compound datatype. Recursive calls are required to set + * parameters for these complex situations. + * + * Before setting the parameters, the number of parameters should be calculated to dynamically + * allocate the array cd_values[], which will be passed to the HDF5 Library. This also requires + * recursive calls. + * + * For an atomic datatype (integer or floating-point), parameters that will be stored include the + * datatype’s size, endianness, precision, and offset. + * + * For a no-op datatype, only the size is required. + * + * For a compound datatype, parameters that will be stored include the datatype’s total size and + * number of members. For each member, its member offset needs to be stored. Other parameters + * for members will depend on the respective datatype class. + * + * For an array datatype, the total size parameter should be stored. Other parameters for the array’s + * base type depend on the base type’s datatype class. + * + * Further, to correctly retrieve the parameter for use of n-bit compression or decompression later, + * parameters for distinguishing between datatype classes should be stored. + * + *

    Implementation

    + * Three filter callback functions were written for the n-bit filter: + * \li H5Z__can_apply_nbit + * \li H5Z__set_local_nbit + * \li H5Z__filter_nbit + * + * These functions are called internally by the HDF5 library. A number of utility functions were + * written for the function H5Z__set_local_nbit. Compression and decompression functions were + * written and are called by function H5Z__filter_nbit. All these functions are included in the file + * H5Znbit.c. + * + * The public function #H5Pset_nbit is called by the application to set up the use of the n-bit filter. + * This function is included in the file H5Pdcpl.c. The application does not need to supply any + * parameters. + * + *

    How N-bit Parameters are Stored

    + * A scheme of storing parameters required by the n-bit filter in the array cd_values[] was + * developed utilizing recursive function calls. + * + * Four private utility functions were written for storing the parameters associated with atomic + * (integer or floating-point), no-op, array, and compound datatypes: + * \li H5Z__set_parms_atomic + * \li H5Z__set_parms_array + * \li H5Z__set_parms_nooptype + * \li H5Z__set_parms_compound + * + * The scheme is briefly described below. + * + * First, assign a numeric code for datatype class atomic (integer or float), no-op, array, and + * compound datatype. The code is stored before other datatype related parameters are stored. + * + * The first three parameters of cd_values[] are reserved for: + * \li 1. The number of valid entries in the array cd_values[] + * \li 2. A flag indicating whether compression is needed + * \li 3. The number of elements in the chunk + * + * Throughout the balance of this explanation, i represents the index of cd_values[]. + * In the function H5Z__set_local_nbit: + *
    • 1. i = 2
    • + *
    • 2. Get the number of elements in the chunk and store in cd_value[i]; increment i
    • + *
    • 3. Get the class of the datatype: + *
      • For an integer or floating-point datatype, call H5Z__set_parms_atomic
      • + *
      • For an array datatype, call H5Z__set_parms_array
      • + *
      • For a compound datatype, call H5Z__set_parms_compound
      • + *
      • For none of the above, call H5Z__set_parms_noopdatatype
    • + *
    • 4. Store i in cd_value[0] and flag in cd_values[1]
    + * + * In the function H5Z__set_parms_atomic: + * \li 1. Store the assigned numeric code for the atomic datatype in cd_value[i]; increment i + * \li 2. Get the size of the atomic datatype and store in cd_value[i]; increment i + * \li 3. Get the order of the atomic datatype and store in cd_value[i]; increment i + * \li 4. Get the precision of the atomic datatype and store in cd_value[i]; increment i + * \li 5. Get the offset of the atomic datatype and store in cd_value[i]; increment i + * \li 6. Determine the need to do compression at this point + * + * In the function H5Z__set_parms_nooptype: + * \li 1. Store the assigned numeric code for the no-op datatype in cd_value[i]; increment i + * \li 2. Get the size of the no-op datatype and store in cd_value[i]; increment i + * + * In the function H5Z__set_parms_array: + *
    • 1. Store the assigned numeric code for the array datatype in cd_value[i]; increment i
    • + *
    • 2. Get the size of the array datatype and store in cd_value[i]; increment i
    • + *
    • 3. Get the class of the array’s base datatype. + *
      • For an integer or floating-point datatype, call H5Z__set_parms_atomic
      • + *
      • For an array datatype, call H5Z__set_parms_array
      • + *
      • For a compound datatype, call H5Z__set_parms_compound
      • + *
      • If none of the above, call H5Z__set_parms_noopdatatype
    + * + * In the function H5Z__set_parms_compound: + *
    • 1. Store the assigned numeric code for the compound datatype in cd_value[i]; increment i
    • + *
    • 2. Get the size of the compound datatype and store in cd_value[i]; increment i
    • + *
    • 3. Get the number of members and store in cd_values[i]; increment i
    • + *
    • 4. For each member + *
      • Get the member offset and store in cd_values[i]; increment i
      • + *
      • Get the class of the member datatype
      • + *
      • For an integer or floating-point datatype, call H5Z__set_parms_atomic
      • + *
      • For an array datatype, call H5Z__set_parms_array
      • + *
      • For a compound datatype, call H5Z__set_parms_compound
      • + *
      • If none of the above, call H5Z__set_parms_noopdatatype
    + * + *

    N-bit Compression and Decompression Functions

    + * The n-bit compression and decompression functions above are called by the private HDF5 + * function H5Z__filter_nbit. The compress and decompress functions retrieve the n-bit parameters + * from cd_values[] as it was passed by H5Z__filter_nbit. Parameters are retrieved in exactly the + * same order in which they are stored and lower-level compression and decompression functions + * for different datatype classes are called. + * + * N-bit compression is not implemented in place. Due to the difficulty of calculating actual output + * buffer size after compression, the same space as that of the input buffer is allocated for the output + * buffer as passed to the compression function. However, the size of the output buffer passed by + * reference to the compression function will be changed (smaller) after the compression is + * complete. + * + *

    Usage Examples

    + * + * The following code example illustrates the use of the n-bit filter for writing and reading n-bit + * integer data. + * + * N-bit compression for integer data + * \code + * #include "hdf5.h" + * #include "stdlib.h" + * #include "math.h" + * + * #define H5FILE_NAME "nbit_test_int.h5" + * #define DATASET_NAME "nbit_int" + * #define NX 200 + * #define NY 300 + * #define CH_NX 10 + * #define CH_NY 15 + * + * int main(void) + * { + * hid_t file, dataspace, dataset, datatype, mem_datatype, dset_create_props; + * hsize_t dims[2], chunk_size[2]; + * int orig_data[NX][NY]; + * int new_data[NX][NY]; + * int i, j; + * size_t precision, offset; + * + * // Define dataset datatype (integer), and set precision, offset + * datatype = H5Tcopy(H5T_NATIVE_INT); + * precision = 17; // precision includes sign bit + * if(H5Tset_precision(datatype,precision) < 0) { + * printf("Error: fail to set precision\n"); + * return -1; + * } + * offset = 4; + * if(H5Tset_offset(datatype,offset) < 0) { + * printf("Error: fail to set offset\n"); + * return -1; + * } + * + * // Copy to memory datatype + * mem_datatype = H5Tcopy(datatype); + * + * // Set order of dataset datatype + * if(H5Tset_order(datatype, H5T_ORDER_BE) < 0) { + * printf("Error: fail to set endianness\n"); + * return -1; + * } + * + * // Initialize data buffer with random data within correct + * // range corresponding to the memory datatype's precision + * // and offset. + * for (i = 0; i < NX; i++) + * for (j = 0; j < NY; j++) + * orig_data[i][j] = rand() % (int)pow(2, precision-1) << offset; + * + * // Describe the size of the array. + * dims[0] = NX; + * dims[1] = NY; + * if((dataspace = H5Screate_simple (2, dims, NULL)) < 0) { + * printf("Error: fail to create dataspace\n"); + * return -1; + * } + * + * // Create a new file using read/write access, default file + * // creation properties, and default file access properties. + * if((file = H5Fcreate (H5FILE_NAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + * printf("Error: fail to create file\n"); + * return -1; + * } + * + * // Set the dataset creation property list to specify that + * // the raw data is to be partitioned into 10 x 15 element + * // chunks and that each chunk is to be compressed. + * chunk_size[0] = CH_NX; + * chunk_size[1] = CH_NY; + * if((dset_create_props = H5Pcreate (H5P_DATASET_CREATE)) < 0) { + * printf("Error: fail to create dataset property\n"); + * return -1; + * } + * if(H5Pset_chunk (dset_create_props, 2, chunk_size) < 0) { + * printf("Error: fail to set chunk\n"); + * return -1; + * } + * + * // Set parameters for n-bit compression; check the description + * // of the H5Pset_nbit function in the HDF5 Reference Manual + * // for more information. + * if(H5Pset_nbit (dset_create_props) < 0) { + * printf("Error: fail to set nbit filter\n"); + * return -1; + * } + * + * // Create a new dataset within the file. The datatype + * // and dataspace describe the data on disk, which may + * // be different from the format used in the application's + * // memory. + * if((dataset = H5Dcreate(file, DATASET_NAME, datatype, dataspace, + * H5P_DEFAULT, dset_create_props, H5P_DEFAULT)) < 0) { + * printf("Error: fail to create dataset\n"); + * return -1; + * } + * + * // Write the array to the file. The datatype and dataspace + * // describe the format of the data in the 'orig_data' buffer. + * // The raw data is translated to the format required on disk, + * // as defined above. We use default raw data transfer + * // properties. + * if(H5Dwrite (dataset, mem_datatype, H5S_ALL, H5S_ALL, H5P_DEFAULT, orig_data) < 0) { + * printf("Error: fail to write to dataset\n"); + * return -1; + * } + * H5Dclose (dataset); + * + * if((dataset = H5Dopen(file, DATASET_NAME, H5P_DEFAULT)) < 0) { + * printf("Error: fail to open dataset\n"); + * return -1; + * } + * + * // Read the array. This is similar to writing data, + * // except the data flows in the opposite direction. + * // Note: Decompression is automatic. + * if(H5Dread (dataset, mem_datatype, H5S_ALL, H5S_ALL, H5P_DEFAULT, new_data) < 0) { + * printf("Error: fail to read from dataset\n"); + * return -1; + * } + * + * H5Tclose (datatype); + * H5Tclose (mem_datatype); + * H5Dclose (dataset); + * H5Sclose (dataspace); + * H5Pclose (dset_create_props); + * H5Fclose (file); + * + * return 0; + * } + * \endcode + * + * The following code example illustrates the use of the n-bit filter for writing and reading n-bit + * floating-point data. + * + * N-bit compression for floating-point data + * \code + * #include "hdf5.h" + * + * #define H5FILE_NAME "nbit_test_float.h5" + * #define DATASET_NAME "nbit_float" + * #define NX 2 + * #define NY 5 + * #define CH_NX 2 + * #define CH_NY 5 + * + * int main(void) + * { + * hid_t file, dataspace, dataset, datatype, dset_create_props; + * hsize_t dims[2], chunk_size[2]; + * + * // orig_data[] are initialized to be within the range that + * // can be represented by dataset datatype (no precision + * // loss during datatype conversion) + * // + * float orig_data[NX][NY] = {{188384.00, 19.103516,-1.0831790e9, -84.242188, 5.2045898}, + * {-49140.000, 2350.2500, -3.2110596e-1, 6.4998865e-5, -0.0000000}}; + * float new_data[NX][NY]; + * size_t precision, offset; + * + * // Define single-precision floating-point type for dataset + * //--------------------------------------------------------------- + * // size=4 byte, precision=20 bits, offset=7 bits, + * // mantissa size=13 bits, mantissa position=7, + * // exponent size=6 bits, exponent position=20, + * // exponent bias=31. + * // It can be illustrated in little-endian order as: + * // (S - sign bit, E - exponent bit, M - mantissa bit, ? - padding bit) + * // + * // 3 2 1 0 + * // ?????SEE EEEEMMMM MMMMMMMM M??????? + * // + * // To create a new floating-point type, the following + * // properties must be set in the order of + * // set fields -> set offset -> set precision -> set size. + * // All these properties must be set before the type can + * // function. Other properties can be set anytime. Derived + * // type size cannot be expanded bigger than original size + * // but can be decreased. There should be no holes + * // among the significant bits. Exponent bias usually + * // is set 2^(n-1)-1, where n is the exponent size. + * //--------------------------------------------------------------- + * datatype = H5Tcopy(H5T_IEEE_F32BE); + * if(H5Tset_fields(datatype, 26, 20, 6, 7, 13) < 0) { + * printf("Error: fail to set fields\n"); + * return -1; + * } + * offset = 7; + * if(H5Tset_offset(datatype,offset) < 0) { + * printf("Error: fail to set offset\n"); + * return -1; + * } + * precision = 20; + * if(H5Tset_precision(datatype,precision) < 0) { + * printf("Error: fail to set precision\n"); + * return -1; + * } + * if(H5Tset_size(datatype, 4) < 0) { + * printf("Error: fail to set size\n"); + * return -1; + * } + * if(H5Tset_ebias(datatype, 31) < 0) { + * printf("Error: fail to set exponent bias\n"); + * return -1; + * } + * + * // Describe the size of the array. + * dims[0] = NX; + * dims[1] = NY; + * if((dataspace = H5Screate_simple (2, dims, NULL)) < 0) { + * printf("Error: fail to create dataspace\n"); + * return -1; + * } + * + * // Create a new file using read/write access, default file + * // creation properties, and default file access properties. + * if((file = H5Fcreate (H5FILE_NAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + * printf("Error: fail to create file\n"); + * return -1; + * } + * + * // Set the dataset creation property list to specify that + * // the raw data is to be partitioned into 2 x 5 element + * // chunks and that each chunk is to be compressed. + * chunk_size[0] = CH_NX; + * chunk_size[1] = CH_NY; + * if((dset_create_props = H5Pcreate (H5P_DATASET_CREATE)) < 0) { + * printf("Error: fail to create dataset property\n"); + * return -1; + * } + * if(H5Pset_chunk (dset_create_props, 2, chunk_size) < 0) { + * printf("Error: fail to set chunk\n"); + * return -1; + * } + * + * // Set parameters for n-bit compression; check the description + * // of the H5Pset_nbit function in the HDF5 Reference Manual + * // for more information. + * if(H5Pset_nbit (dset_create_props) < 0) { + * printf("Error: fail to set nbit filter\n"); + * return -1; + * } + * + * // Create a new dataset within the file. The datatype + * // and dataspace describe the data on disk, which may + * // be different from the format used in the application's memory. + * if((dataset = H5Dcreate(file, DATASET_NAME, datatype, dataspace, H5P_DEFAULT, + * dset_create_plists, H5P_DEFAULT)) < 0) { + * printf("Error: fail to create dataset\n"); + * return -1; + * } + * + * // Write the array to the file. The datatype and dataspace + * // describe the format of the data in the 'orig_data' buffer. + * // The raw data is translated to the format required on disk, + * // as defined above. We use default raw data transfer properties. + * if(H5Dwrite (dataset, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, orig_data) < 0) { + * printf("Error: fail to write to dataset\n"); + * return -1; + * } + * H5Dclose (dataset); + * if((dataset = H5Dopen(file, DATASET_NAME, H5P_DEFAULT))<0) { + * printf("Error: fail to open dataset\n"); + * return -1; + * } + * + * // Read the array. This is similar to writing data, + * // except the data flows in the opposite direction. + * // Note: Decompression is automatic. + * if(H5Dread (dataset, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, new_data) < 0) { + * printf("Error: fail to read from dataset\n"); + * return -1; + * } + * H5Tclose (datatype); + * H5Dclose (dataset); + * H5Sclose (dataspace); + * H5Pclose (dset_create_props); + * H5Fclose (file); + * + * return 0 + * } + * \endcode + * + *

    Limitations

    + * Because the array cd_values[] has to fit into an object header message of 64K, the n-bit filter has + * an upper limit on the number of n-bit parameters that can be stored in it. To be conservative, a + * maximum of 4K is allowed for the number of parameters. + * + * The n-bit filter currently only compresses n-bit datatypes or fields derived from integer or + * floating-point datatypes. The n-bit filter assumes padding bits of zero. This may not be true since + * the HDF5 user can set padding bit to be zero, one, or leave the background alone. However, it is + * expected the n-bit filter will be modified to adjust to such situations. + * + * The n-bit filter does not have a way to handle the situation where the fill value of a dataset is + * defined and the fill value is not of an n-bit datatype although the dataset datatype is. + * + * \subsubsection subsubsec_dataset_filters_scale Using the Scale‐offset Filter + * Generally speaking, scale-offset compression performs a scale and/or offset operation on each + * data value and truncates the resulting value to a minimum number of bits (minimum-bits) before + * storing it. + * + * The current scale-offset filter supports integer and floating-point datatypes only. For the floating- + * point datatype, float and double are supported, but long double is not supported. + * + * Integer data compression uses a straight-forward algorithm. Floating-point data compression + * adopts the GRiB data packing mechanism which offers two alternate methods: a fixed minimum- + * bits method, and a variable minimum-bits method. Currently, only the variable minimum-bits + * method is implemented. + * + * Like other I/O filters supported by the HDF5 library, applications using the scale-offset filter + * must store data with chunked storage. + * + * Integer type: The minimum-bits of integer data can be determined by the filter. For example, if + * the maximum value of data to be compressed is 7065 and the minimum value is 2970. Then the + * “span” of dataset values is equal to (max-min+1), which is 4676. If no fill value is defined for the + * dataset, the minimum-bits is: ceiling(log2(span)) = 12. With fill value set, the minimum-bits is: + * ceiling(log2(span+1)) = 13. + * + * HDF5 users can also set the minimum-bits. However, if the user gives a minimum-bits that is + * less than that calculated by the filter, the compression will be lossy. + * + * Floating-point type: The basic idea of the scale-offset filter for the floating-point type is to + * transform the data by some kind of scaling to integer data, and then to follow the procedure of + * the scale-offset filter for the integer type to do the data compression. Due to the data + * transformation from floating-point to integer, the scale-offset filter is lossy in nature. + * + * Two methods of scaling the floating-point data are used: the so-called D-scaling and E-scaling. + * D-scaling is more straightforward and easy to understand. For HDF5 1.8 release, only the + * D-scaling method had been implemented. + * + *

    Design

    + * Before the filter does any real work, it needs to gather some information from the HDF5 Library + * through API calls. The parameters the filter needs are: + * \li The minimum-bits of the data value + * \li The number of data elements in the chunk + * \li The datatype class, size, sign (only for integer type), byte order, and fill value if defined + * + * Size and sign are needed to determine what kind of pointer cast to use when retrieving values + * from the data buffer. + * + * The pipeline of the filter can be divided into four parts: (1)pre-compression; (2)compression; + * (3)decompression; (4)post-decompression. + * + * Depending on whether a fill value is defined or not, the filter will handle pre-compression and + * post-decompression differently. + * + * The scale-offset filter only needs the memory byte order, size of datatype, and minimum-bits for + * compression and decompression. + * + * Since decompression has no access to the original data, the minimum-bits and the minimum + * value need to be stored with the compressed data for decompression and post-decompression. + * + *

    Integer Type

    + * Pre-compression: During pre-compression minimum-bits is calculated if it is not set by the user. + * For more information on how minimum-bits are calculated, @see @ref subsubsec_dataset_filters_nbit. + * + * If the fill value is defined, finding the maximum and minimum values should ignore the data + * element whose value is equal to the fill value. + * + * If no fill value is defined, the value of each data element is subtracted by the minimum value + * during this stage. + * + * If the fill value is defined, the fill value is assigned to the maximum value. In this way minimum- + * bits can represent a data element whose value is equal to the fill value and subtracts the + * minimum value from a data element whose value is not equal to the fill value. + * + * The fill value (if defined), the number of elements in a chunk, the class of the datatype, the size + * of the datatype, the memory order of the datatype, and other similar elements will be stored in + * the HDF5 object header for the post-decompression usage. + * + * After pre-compression, all values are non-negative and are within the range that can be stored by + * minimum-bits. + * + * Compression: All modified data values after pre-compression are packed together into the + * compressed data buffer. The number of bits for each data value decreases from the number of + * bits of integer (32 for most platforms) to minimum-bits. The value of minimum-bits and the + * minimum value are added to the data buffer and the whole buffer is sent back to the library. In + * this way, the number of bits for each modified value is no more than the size of minimum-bits. + * + * Decompression: In this stage, the number of bits for each data value is resumed from minimum- + * bits to the number of bits of integer. + * + * Post-decompression: For the post-decompression stage, the filter does the opposite of what it + * does during pre-compression except that it does not calculate the minimum-bits or the minimum + * value. These values were saved during compression and can be retrieved through the resumed + * data buffer. If no fill value is defined, the filter adds the minimum value back to each data + * element. + * + * If the fill value is defined, the filter assigns the fill value to the data element whose value is equal + * to the maximum value that minimum-bits can represent and adds the minimum value back to + * each data element whose value is not equal to the maximum value that minimum-bits can + * represent. + * + * @anchor h4_float_datatype

    Floating-point Type

    + * The filter will do data transformation from floating-point type to integer type and then handle the + * data by using the procedure for handling the integer data inside the filter. Insignificant bits of + * floating-point data will be cut off during data transformation, so this filter is a lossy compression + * method. + * + * There are two scaling methods: D-scaling and E-scaling. The HDF5 1.8 release only supports D- + * scaling. D-scaling is short for decimal scaling. E-scaling should be similar conceptually. In order + * to transform data from floating-point to integer, a scale factor is introduced. The minimum value + * will be calculated. Each data element value will subtract the minimum value. The modified data + * will be multiplied by 10 (Decimal) to the power of scale_factor, and only the integer part will be + * kept and manipulated through the routines for the integer type of the filter during pre- + * compression and compression. Integer data will be divided by 10 to the power of scale_factor to + * transform back to floating-point data during decompression and post-decompression. Each data + * element value will then add the minimum value, and the floating-point data are resumed. + * However, the resumed data will lose some insignificant bits compared with the original value. + * + * For example, the following floating-point data are manipulated by the filter, and the D-scaling + * factor is 2. + * {104.561, 99.459, 100.545, 105.644} + * + * The minimum value is 99.459, each data element subtracts 99.459, the modified data is + * {5.102, 0, 1.086, 6.185} + * + * Since the D-scaling factor is 2, all floating-point data will be multiplied by 10^2 with this result: + * {510.2, 0, 108.6, 618.5} + * + * The digit after decimal point will be rounded off, and then the set looks like: + * {510, 0, 109, 619} + * + * After decompression, each value will be divided by 10^2 and will be added to the offset 99.459. + * The floating-point data becomes + * {104.559, 99.459, 100.549, 105.649} + * + * The relative error for each value should be no more than 5* (10^(D-scaling factor +1)). + * D-scaling sometimes is also referred as a variable minimum-bits method since for different datasets + * the minimum-bits to represent the same decimal precision will vary. The data value is modified + * to 2 to power of scale_factor for E-scaling. E-scaling is also called fixed-bits method since for + * different datasets the minimum-bits will always be fixed to the scale factor of E-scaling. + * Currently, HDF5 ONLY supports the D-scaling (variable minimum-bits) method. + * + *

    Implementation

    + * The scale-offset filter implementation was written and included in the file H5Zscaleoffset.c. + * Function #H5Pset_scaleoffset was written and included in the file “H5Pdcpl.c”. The HDF5 user + * can supply minimum-bits by calling function #H5Pset_scaleoffset. + * + * The scale-offset filter was implemented based on the design outlined in this section. However, + * the following factors need to be considered: + *
    1. + * The filter needs the appropriate cast pointer whenever it needs to retrieve data values. + *
    2. + *
    3. + * The HDF5 Library passes to the filter the to-be-compressed data in the format of the dataset + * datatype, and the filter passes back the decompressed data in the same format. If a fill value is + * defined, it is also in dataset datatype format. For example, if the byte order of the dataset data- + * type is different from that of the memory datatype of the platform, compression or decompression performs + * an endianness conversion of data buffer. Moreover, it should be aware that + * memory byte order can be different during compression and decompression. + *
    4. + *
    5. + * The difference of endianness and datatype between file and memory should be considered + * when saving and retrieval of minimum-bits, minimum value, and fill value. + *
    6. + * If the user sets the minimum-bits to full precision of the datatype, no operation is needed at + * the filter side. If the full precision is a result of calculation by the filter, then the minimum-bits + * needs to be saved for decompression but no compression or decompression is needed (only a + * copy of the input buffer is needed).
    7. + *
    8. + * If by calculation of the filter, the minimum-bits is equal to zero, special handling is needed. + * Since it means all values are the same, no compression or decompression is needed. But the + * minimum-bits and minimum value still need to be saved during compression.
    9. + *
    10. + * For floating-point data, the minimum value of the dataset should be calculated at first. Each + * data element value will then subtract the minimum value to obtain the “offset” data. The offset + * data will then follow the steps outlined above in the discussion of floating-point types to do data + * transformation to integer and rounding. For more information, @see @ref h4_float_datatype. + *
    + * + *

    Usage Examples

    + * The following code example illustrates the use of the scale-offset filter for writing and reading + * integer data. + * + * Scale-offset compression integer data + * \code + * #include "hdf5.h" + * #include "stdlib.h" + * + * #define H5FILE_NAME "scaleoffset_test_int.h5" + * #define DATASET_NAME "scaleoffset_int" + * #define NX 200 + * #define NY 300 + * #define CH_NX 10 + * #define CH_NY 15 + * int main(void) + * { + * hid_t file, dataspace, dataset, datatype, dset_create_props; + * hsize_t dims[2], chunk_size[2]; + * int orig_data[NX][NY]; + * int new_data[NX][NY]; + * int i, j, fill_val; + * + * // Define dataset datatype + * datatype = H5Tcopy(H5T_NATIVE_INT); + * + * // Initialize data buffer + * for (i=0; i < NX; i++) + * for (j=0; j < NY; j++) + * orig_data[i][j] = rand() % 10000; + * + * // Describe the size of the array. + * dims[0] = NX; + * dims[1] = NY; + * if((dataspace = H5Screate_simple (2, dims, NULL)) < 0) { + * printf("Error: fail to create dataspace\n"); + * return -1; + * } + * + * // Create a new file using read/write access, default file + * // creation properties, and default file access properties. + * if((file = H5Fcreate (H5FILE_NAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + * printf("Error: fail to create file\n"); + * return -1; + * } + * + * // Set the dataset creation property list to specify that + * // the raw data is to be partitioned into 10 x 15 element + * // chunks and that each chunk is to be compressed. + * chunk_size[0] = CH_NX; + * chunk_size[1] = CH_NY; + * if((dset_create_props = H5Pcreate (H5P_DATASET_CREATE)) < 0) { + * printf("Error: fail to create dataset property\n"); + * return -1; + * } + * if(H5Pset_chunk (dset_create_props, 2, chunk_size) < 0) { + * printf("Error: fail to set chunk\n"); + * return -1; + * } + * + * // Set the fill value of dataset + * fill_val = 10000; + * if (H5Pset_fill_value(dset_create_props, H5T_NATIVE_INT, &fill_val)<0) { + * printf("Error: can not set fill value for dataset\n"); + * return -1; + * } + * + * // Set parameters for scale-offset compression. Check the + * // description of the H5Pset_scaleoffset function in the + * // HDF5 Reference Manual for more information. + * if(H5Pset_scaleoffset (dset_create_props, H5Z_SO_INT, H5Z_SO_INT_MINIMUMBITS_DEFAULT) < 0) { + * printf("Error: fail to set scaleoffset filter\n"); + * return -1; + * } + * + * // Create a new dataset within the file. The datatype + * // and dataspace describe the data on disk, which may + * // or may not be different from the format used in the + * // application's memory. The link creation and + * // dataset access property list parameters are passed + * // with default values. + * if((dataset = H5Dcreate (file, DATASET_NAME, datatype, dataspace, H5P_DEFAULT, + * dset_create_props, H5P_DEFAULT)) < 0) { + * printf("Error: fail to create dataset\n"); + * return -1; + * } + * + * // Write the array to the file. The datatype and dataspace + * // describe the format of the data in the 'orig_data' buffer. + * // We use default raw data transfer properties. + * if(H5Dwrite (dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, orig_data) < 0) { + * printf("Error: fail to write to dataset\n"); + * return -1; + * } + * + * H5Dclose (dataset); + * + * if((dataset = H5Dopen(file, DATASET_NAME, H5P_DEFAULT)) < 0) { + * printf("Error: fail to open dataset\n"); + * return -1; + * } + * + * // Read the array. This is similar to writing data, + * // except the data flows in the opposite direction. + * // Note: Decompression is automatic. + * if(H5Dread (dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, new_data) < 0) { + * printf("Error: fail to read from dataset\n"); + * return -1; + * } + * + * H5Tclose (datatype); + * H5Dclose (dataset); + * H5Sclose (dataspace); + * H5Pclose (dset_create_props); + * H5Fclose (file); + * + * return 0; + * } + * \endcode + * + * The following code example illustrates the use of the scale-offset filter (set for variable + * minimum-bits method) for writing and reading floating-point data. + * + * Scale-offset compression floating-point data + * \code + * #include "hdf5.h" + * #include "stdlib.h" + * + * #define H5FILE_NAME "scaleoffset_test_float_Dscale.h5" + * #define DATASET_NAME "scaleoffset_float_Dscale" + * #define NX 200 + * #define NY 300 + * #define CH_NX 10 + * #define CH_NY 15 + * + * int main(void) + * { + * hid_t file, dataspace, dataset, datatype, dset_create_props; + * hsize_t dims[2], chunk_size[2]; + * float orig_data[NX][NY]; + * float new_data[NX][NY]; + * float fill_val; + * int i, j; + * + * // Define dataset datatype + * datatype = H5Tcopy(H5T_NATIVE_FLOAT); + * + * // Initialize data buffer + * for (i=0; i < NX; i++) + * for (j=0; j < NY; j++) + * orig_data[i][j] = (rand() % 10000) / 1000.0; + * + * // Describe the size of the array. + * dims[0] = NX; + * dims[1] = NY; + * if((dataspace = H5Screate_simple (2, dims, NULL)) < 0) { + * printf("Error: fail to create dataspace\n"); + * return -1; + * } + * + * // Create a new file using read/write access, default file + * // creation properties, and default file access properties. + * if((file = H5Fcreate (H5FILE_NAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { + * printf("Error: fail to create file\n"); + * return -1; + * } + * + * // Set the dataset creation property list to specify that + * // the raw data is to be partitioned into 10 x 15 element + * // chunks and that each chunk is to be compressed. + * chunk_size[0] = CH_NX; + * chunk_size[1] = CH_NY; + * if((dset_create_props = H5Pcreate (H5P_DATASET_CREATE)) < 0) { + * printf("Error: fail to create dataset property\n"); + * return -1; + * } + * if(H5Pset_chunk (dset_create_props, 2, chunk_size) < 0) { + * printf("Error: fail to set chunk\n"); + * return -1; + * } + * + * // Set the fill value of dataset + * fill_val = 10000.0; + * if (H5Pset_fill_value(dset_create_props, H5T_NATIVE_FLOAT, &fill_val) < 0) { + * printf("Error: can not set fill value for dataset\n"); + * return -1; + * } + * + * // Set parameters for scale-offset compression; use variable + * // minimum-bits method, set decimal scale factor to 3. Check + * // the description of the H5Pset_scaleoffset function in the + * // HDF5 Reference Manual for more information. + * if(H5Pset_scaleoffset (dset_create_props, H5Z_SO_FLOAT_DSCALE, 3) < 0) { + * printf("Error: fail to set scaleoffset filter\n"); + * return -1; + * } + * + * // Create a new dataset within the file. The datatype + * // and dataspace describe the data on disk, which may + * // or may not be different from the format used in the + * // application's memory. + * if((dataset = H5Dcreate (file, DATASET_NAME, datatype, dataspace, H5P_DEFAULT, + * dset_create_props, H5P_DEFAULT)) < 0) { + * printf("Error: fail to create dataset\n"); + * return -1; + * } + * + * // Write the array to the file. The datatype and dataspace + * // describe the format of the data in the 'orig_data' buffer. + * // We use default raw data transfer properties. + * if(H5Dwrite (dataset, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, orig_data) < 0) { + * printf("Error: fail to write to dataset\n"); + * return -1; + * } + * + * H5Dclose (dataset); + * + * if((dataset = H5Dopen(file, DATASET_NAME, H5P_DEFAULT)) < 0) { + * printf("Error: fail to open dataset\n"); + * return -1; + * } + * + * // Read the array. This is similar to writing data, + * // except the data flows in the opposite direction. + * // Note: Decompression is automatic. + * if(H5Dread (dataset, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, new_data) < 0) { + * printf("Error: fail to read from dataset\n"); + * return -1; + * } + * + * H5Tclose (datatype); + * H5Dclose (dataset); + * H5Sclose (dataspace); + * H5Pclose (dset_create_props); + * H5Fclose (file); + * + * return 0; + * } + * \endcode + * + *

    Limitations

    + * For floating-point data handling, there are some algorithmic limitations to the GRiB data packing + * mechanism: + *
    1. + * Both the E-scaling and D-scaling methods are lossy compression + *
    2. + *
    3. + * For the D-scaling method, since data values have been rounded to integer values (positive) + * before truncating to the minimum-bits, their range is limited by the maximum value that can be + * represented by the corresponding unsigned integer type (the same size as that of the floating- + * point type) + *
    + * + *

    Suggestions

    + * The following are some suggestions for using the filter for floating-point data: + *
    1. + * It is better to convert the units of data so that the units are within certain common range (for + * example, 1200m to 1.2km) + *
    2. + *
    3. + * If data values to be compressed are very near to zero, it is strongly recommended that the + * user sets the fill value away from zero (for example, a large positive number); if the user does + * nothing, the HDF5 library will set the fill value to zero, and this may cause undesirable + * compression results + *
    4. + *
    5. + * Users are not encouraged to use a very large decimal scale factor (for example, 100) for the + * D-scaling method; this can cause the filter not to ignore the fill value when finding maximum + * and minimum values, and they will get a much larger minimum-bits (poor compression) + *
    + * + * \subsubsection subsubsec_dataset_filters_szip Using the Szip Filter + * See The HDF Group website for further information regarding the Szip filter. + * + * Previous Chapter \ref sec_group - Next Chapter \ref sec_datatype + * + */ + +/** + * \defgroup H5D Datasets (H5D) * * Use the functions in this module to manage HDF5 datasets, including the * transfer of data between memory and disk and the description of dataset diff --git a/src/H5Dpublic.h b/src/H5Dpublic.h index 8126aff..6fad138 100644 --- a/src/H5Dpublic.h +++ b/src/H5Dpublic.h @@ -666,7 +666,7 @@ H5_DLL herr_t H5Dget_chunk_info_by_coord(hid_t dset_id, const hsize_t *offset, u * \brief Iterate over all chunks of a chunked dataset * * \dset_id - * \param[in] dxpl_id Identifier of a transfer property list + * \param[in] dxpl_id Identifier of a transfer property list * \param[in] cb User callback function, called for every chunk. * \param[in] op_data User-defined pointer to data required by op * diff --git a/src/H5ESmodule.h b/src/H5ESmodule.h index 205089a..b05b7f4 100644 --- a/src/H5ESmodule.h +++ b/src/H5ESmodule.h @@ -28,7 +28,93 @@ #define H5_MY_PKG H5ES #define H5_MY_PKG_ERR H5E_EVENTSET -/**\defgroup H5ES H5ES +/** \page H5ES_UG The HDF5 Event Set + * @todo Under Construction + * + * \section sec_async The HDF5 Event Set Interface + * + * \section subsec_async_intro Introduction + * HDF5 provides asynchronous APIs for the HDF5 VOL connectors that support asynchronous HDF5 + * operations using the HDF5 Event Set (H5ES) API. This allows I/O to proceed in the background + * while the application is performing other tasks. + * + * To support AIO capabilities for the HDF5 VOL connectors, the AIO versions for the functions + * listed in the table below were added to HDF5 library version 1.13.0 and later. The async version + * of the function has “_async” suffix added to the function name. For example, the async version + * for H5Fcreate is H5Fcreate_async. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    InterfaceFunctions
    H5F#H5Fcreate, #H5Fflush, #H5Fis_accessible, #H5Fopen, #H5Fclose + *
    H5G#H5Gcreate, #H5Gget_info, #H5Gget_info_by_idx, #H5Gget_info_by_name, #H5Gclose + *
    H5D#H5Dcreate, #H5Dopen, #H5Dset_extent, #H5Dwrite, #H5Dread, #H5Dget_space, #H5Dclose + *
    H5A#H5Acreate, #H5Acreate_by_name, #H5Aopen, #H5Aopen_by_name, #H5Aexists, #H5Awrite, #H5Aread, +#H5Aclose, #H5Aopen_by_idx, #H5Arename, #H5Arename_by_name + *
    H5L#H5Lcreate_hard, #H5Lcreate_soft, #H5Ldelete, #H5Ldelete_by_idx, #H5Lexists + *
    H5O#H5Ocopy, #H5Orefresh, #H5Oflush, #H5Oclose, #H5Oopen, #H5Oopen_by_idx + *
    H5R#H5Ropen_attr, #H5Ropen_object #H5Ropen_region, #H5Rdereference + *
    H5M#H5Mcreate, #H5Mopen, #H5Mput, #H5Mget, #H5Mclose + *
    H5T#H5Tcommit, #H5Topen, #H5Tcopy, #H5Tclose + *
    + * + * Async versions of the functions have an extra parameter called the event set parameter or es_id. + * For example, compare the signatures of #H5Dclose and #H5Dclose_async: + * \code + * herr_t H5Dclose(hid_t dset_id); + * herr_t H5Dclose_async(hid_t dset_id, hid_t es_id); + * \endcode + * + * An event set is an in-memory object that is created by an application and used to track many + * asynchronous operations with a single object. They function like a "bag" -- holding request + * tokens from one or more asynchronous operations and provide a simple interface for inspecting + * the status of the entire set of operations. + * + * See the \ref H5ES APIs that were added to the HDF5 library to manage event sets. + * + * Previous Chapter \ref sec_vol - Next Chapter \ref sec_map + * + */ + +/**\defgroup H5ES Event Set Interface (H5ES) * * \todo Add the event set life cycle. * diff --git a/src/H5Emodule.h b/src/H5Emodule.h index a2d59f3..0e4655c 100644 --- a/src/H5Emodule.h +++ b/src/H5Emodule.h @@ -28,30 +28,502 @@ #define H5_MY_PKG H5E #define H5_MY_PKG_ERR H5E_ERROR -/**\defgroup H5E H5E +/** \page H5E_UG HDF5 Error Handling * - * Use the functions in this module to manage HDF5 error stacks and error - * messages. + * \section sec_error HDF5 Error Handling + * + * The HDF5 library provides an error reporting mechanism for both the library itself and for user + * application programs. It can trace errors through function stack and error information like file + * name, function name, line number, and error description. + * + * \subsection subsec_error_intro Introduction + * The HDF5 Library provides an error reporting mechanism for both the library itself and for user application + * programs. It can trace errors through function stack and error information like file name, function name, + * line number, and error description. + * + * \ref subsec_error_ops discusses the basic error concepts such as error stack, error record, and error + * message and describes the related API functions. These concepts and functions are sufficient for + * application programs to trace errors inside the HDF5 Library. + * + * \ref subsec_error_adv talks about the advanced concepts of error + * class and error stack handle and talks about the related functions. With these concepts and functions, an + * application library or program using the HDF5 Library can have its own error report blended with HDF5’s + * error report. + * + * Starting with Release 1.8, we have a new set of Error Handling API functions. For the purpose of backward + * compatibility with version 1.6 and before, we still keep the old API functions, \ref H5Epush1, + * \ref H5Eprint1, \ref H5Ewalk1, \ref H5Eclear1, \ref H5Eget_auto1, \ref H5Eset_auto1. These functions do + * not have the error stack as a parameter. The library allows them to operate on the default error stack. + * (The H5E compatibility macros will choose the correct function based on the parameters) + * + * The old API is similar to functionality discussed in \ref subsec_error_ops. The functionality discussed in + * \ref subsec_error_adv,the ability of allowing applications to add their own error records, is the new + * design for the Error Handling API. + * + * \subsection subsec_error_H5E Error Handling Function Summaries + * @see H5E reference manual + * + * \subsection subsec_error_program Programming Model for Error Handling + * This section is under construction. + * + * \subsection subsec_error_ops Basic Error Handling Operations + * Let us first try to understand the error stack. An error stack is a collection of error records. Error + * records can be pushed onto or popped off the error stack. By default, when an error occurs deep within + * the HDF5 Library, an error record is pushed onto an error stack and that function returns a failure + * indication. + * Its caller detects the failure, pushes another record onto the stack, and returns a failure indication. + * This continues until the API function called by the application returns a failure indication. The next + * API function being called will reset the error stack. All HDF5 Library error records belong to the same + * error class. For more information, see \ref subsec_error_adv. + * + * \subsubsection subsubsec_error_ops_stack Error Stack and Error Message + * In normal circumstances, an error causes the stack to be printed on the standard error stream + * automatically. + * This automatic error stack is the library’s default stack. For all the functions in this section, whenever + * an error stack ID is needed as a parameter, \ref H5E_DEFAULT can be used to indicate the library’s default + * stack. The first error record of the error stack, number #000, is produced by the API function itself and + * is usually sufficient to indicate to the application what went wrong. + * + * + * + * + * + *
    Example: An Error Message
    + *

    If an application calls \ref H5Tclose on a + * predefined datatype then the following message is + * printed on the standard error stream. This is a + * simple error that has only one component, the API + * function; other errors may have many components. + *

    + * HDF5-DIAG: Error detected in HDF5 (1.10.9) thread 0.
    + *    #000: H5T.c line ### in H5Tclose(): predefined datatype
    + *       major: Function argument
    + *       minor: Bad value
    + *         
    + *
    + * In the example above, we can see that an error record has a major message and a minor message. A major + * message generally indicates where the error happens. The location can be a dataset or a dataspace, for + * example. A minor message explains further details of the error. An example is “unable to open file”. + * Another specific detail about the error can be found at the end of the first line of each error record. + * This error description is usually added by the library designer to tell what exactly goes wrong. In the + * example above, the “predefined datatype” is an error description. + * + * \subsubsection subsubsec_error_ops_print Print and Clear an Error Stack + * Besides the automatic error report, the error stack can also be printed and cleared by the functions + * \ref H5Eprint2 and \ref H5Eclear2. If an application wishes to make explicit + * calls to \ref H5Eprint2 to print the error stack, the automatic printing should be turned off + * to prevent error messages from being displayed twice (see \ref H5Eset_auto2). + * + * To print an error stack: + * \code + * herr_t H5Eprint2(hid_t error_stack, FILE * stream) + * \endcode + * This function prints the error stack specified by error_stack on the specified stream, stream. If the + * error stack is empty, a one‐line message will be printed. The following is an example of such a message. + * This message would be generated if the error was in the HDF5 Library. + * \code + * HDF5-DIAG: Error detected in HDF5 Library version: 1.10.9 thread 0. + * \endcode + * + * To clear an error stack: + * \code + * herr_t H5Eclear2(hid_t error_stack) + * \endcode + * The \ref H5Eclear2 function shown above clears the error stack specified by error_stack. + * \ref H5E_DEFAULT can be passed in to clear the current error stack. The current stack is also cleared + * whenever an API function is called; there are certain exceptions to this rule such as \ref H5Eprint2. + * + * \subsubsection subsubsec_error_ops_mute Mute Error Stack + * Sometimes an application calls a function for the sake of its return value, fully expecting the function + * to fail; sometimes the application wants to call \ref H5Eprint2 explicitly. In these situations, + * it would be misleading if an error message were still automatically printed. Using the + * \ref H5Eset_auto2 function can control the automatic printing of error messages. + * + * To enable or disable automatic printing of errors: + * \code + * herr_t H5Eset_auto2(hid_t error_stack, H5E_auto_t func, void *client_data) + * \endcode + * The \ref H5Eset_auto2 function can be used to turn on or off the automatic printing of errors + * for the error stack specified by error_stack. When turned on (non‐null func pointer), any API function + * which returns an error indication will first call func, passing it client_data as an argument. When the + * library is first initialized the auto printing function is set to \ref H5Eprint2 and client_data + * is the standard error stream pointer, stderr. + * + * To see the current settings: + * \code + * herr_t H5Eget_auto(hid_t error_stack, H5E_auto_t * func, void **client_data) + * \endcode + * The function above returns the current settings for the automatic error stack traversal function, func, and + * its data, client_data. If either or both of the arguments are null, then the value is not returned. + * + * An application can temporarily turn off error messages while “probing” a function. See the + * example below. + * + * Example: Turn off error messages while probing a function + * \code + * *** Save old error handler *** + * H5E_auto2_t oldfunc; + * void *old_client_data; + * H5Eget_auto2(error_stack, &old_func, &old_client_data); + * *** Turn off error handling *** + * H5Eset_auto2(error_stack, NULL, NULL); + * *** Probe. Likely to fail, but that’s okay *** + * status = H5Fopen (......); + * *** Restore previous error handler *** + * H5Eset_auto2(error_stack, old_func, old_client_data); + * \endcode + * + * Or automatic printing can be disabled altogether and error messages can be explicitly printed. + * + * Example: Disable automatic printing and explicitly print error messages + * \code + * *** Turn off error handling permanently *** + * H5Eset_auto2(error_stack, NULL, NULL); + * *** If failure, print error message *** + * if (H5Fopen (....)<0) { + * H5Eprint2(H5E_DEFAULT, stderr); + * exit (1); + * } + * \endcode + * + * \subsubsection subsubsec_error_ops_custom_print Customized Printing of an Error Stack + * Applications are allowed to define an automatic error traversal function other than the default + * \ref H5Eprint(). For instance, one can define a function that prints a simple, one‐line error message to + * the standard error stream and then exits. The first example below defines a such a function. The second + * example below installs the function as the error handler. + * + * Example: Defining a function to print a simple error message + * \code + * herr_t + * my_hdf5_error_handler(void *unused) + * { + * fprintf (stderr, “An HDF5 error was detected. Bye.\\n”); + * exit (1); + * } + * \endcode + * + * Example: The user‐defined error handler + * \code + * H5Eset_auto2(H5E_DEFAULT, my_hdf5_error_handler, NULL); + * \endcode + * + * \subsubsection subsubsec_error_ops_walk Walk through the Error Stack + * The \ref H5Eprint2 function is actually just a wrapper around the more complex \ref H5Ewalk function + * which traverses an error stack and calls a user‐defined function for each member of the stack. The example + * below shows how \ref H5Ewalk is used. + * \code + * herr_t H5Ewalk(hid_t err_stack, H5E_direction_t direction, + * H5E_walk_t func, void *client_data) + * \endcode + * The error stack err_stack is traversed and func is called for each member of the stack. Its arguments + * are an integer sequence number beginning at zero (regardless of direction) and the client_data + * pointer. If direction is \ref H5E_WALK_UPWARD, then traversal begins at the inner‐most function that + * detected the error and concludes with the API function. Use \ref H5E_WALK_DOWNWARD for the opposite + * order. + * + * \subsubsection subsubsec_error_ops_travers Traverse an Error Stack with a Callback Function + * An error stack traversal callback function takes three arguments: n is a sequence number beginning at + * zero for each traversal, eptr is a pointer to an error stack member, and client_data is the same pointer + * used in the example above passed to \ref H5Ewalk. See the example below. + * \code + * typedef herr_t (*H5E_walk_t)(unsigned n, H5E_error2_t *eptr, void *client_data) + * \endcode + * The H5E_error2_t structure is shown below. + * \code + * typedef struct { + * hid_t cls_id; + * hid_t maj_num; + * hid_t min_num; + * unsigned line; + * const char *func_name; + * const char *file_name; + * const char *desc; + * } H5E_error2_t; + * \endcode + * The maj_num and min_num are major and minor error IDs, func_name is the name of the function where + * the error was detected, file_name and line locate the error within the HDF5 Library source code, and + * desc points to a description of the error. + * + * The following example shows a user‐defined callback function. + * + * Example: A user‐defined callback function + * \code + * \#define MSG_SIZE 64 + * herr_t + * custom_print_cb(unsigned n, const H5E_error2_t *err_desc, void *client_data) + * { + * FILE *stream = (FILE *)client_data; + * char maj[MSG_SIZE]; + * char min[MSG_SIZE]; + * char cls[MSG_SIZE]; + * const int indent = 4; + * + * *** Get descriptions for the major and minor error numbers *** + * if(H5Eget_class_name(err_desc->cls_id, cls, MSG_SIZE) < 0) + * TEST_ERROR; + * if(H5Eget_msg(err_desc->maj_num, NULL, maj, MSG_SIZE) < 0) + * TEST_ERROR; + * if(H5Eget_msg(err_desc->min_num, NULL, min, MSG_SIZE) < 0) + * TEST_ERROR; + * fprintf (stream, “%*serror #%03d: %s in %s(): + * line %u\\n”, + * indent, “”, n, err_desc->file_name, + * err_desc->func_name, err_desc->line); + * fprintf (stream, “%*sclass: %s\\n”, indent*2, “”, cls); + * fprintf (stream, “%*smajor: %s\\n”, indent*2, “”, maj); + * fprintf (stream, “%*sminor: %s\\n”, indent*2, “”, min); + * return 0; + * error: + * return -1; + * } + * \endcode + * + *

    Programming Note for C++ Developers Using C Functions

    + * If a C routine that takes a function pointer as an argument is called from within C++ code, the C routine + * should be returned from normally. + * + * Examples of this kind of routine include callbacks such as \ref H5Pset_elink_cb and + * \ref H5Pset_type_conv_cb and + * functions such as \ref H5Tconvert and \ref H5Ewalk2. + * + * Exiting the routine in its normal fashion allows the HDF5 C Library to clean up its work properly. In other + * words, if the C++ application jumps out of the routine back to the C++ “catch” statement, the library is + * not given the opportunity to close any temporary data structures that were set up when the routine was + * called. The C++ application should save some state as the routine is started so that any problem that + * occurs might be diagnosed. + * + * \subsection subsec_error_adv Advanced Error Handling Operations + * The section above, see \ref subsec_error_ops, discusses the basic error + * handling operations of the library. In that section, all the error records on the error stack are from the + * library itself. In this section, we are going to introduce the operations that allow an application program + * to push its own error records onto the error stack once it declares an error class of its own through the + * HDF5 Error API. * * - * - * - * - * - * - * - * - * - * + * + * + * + * *
    CreateRead
    - * \snippet{lineno} H5E_examples.c create - * - * \snippet{lineno} H5E_examples.c read - *
    UpdateDelete
    - * \snippet{lineno} H5E_examples.c update - * - * \snippet{lineno} H5E_examples.c delete - *
    Example: An Error Report
    + *

    An error report shows both the library’s error record and the application’s error records. + * See the example below. + *

    + * Error Test-DIAG: Error detected in Error Program (1.0)
    + *         thread 8192:
    + *     #000: ../../hdf5/test/error_test.c line ### in main():
    + *         Error test failed
    + *       major: Error in test
    + *       minor: Error in subroutine
    + *     #001: ../../hdf5/test/error_test.c line ### in
    + *         test_error(): H5Dwrite failed as supposed to
    + *       major: Error in IO
    + *       minor: Error in H5Dwrite
    + *   HDF5-DIAG: Error detected in HDF5 (1.10.9) thread #####:
    + *     #002: ../../hdf5/src/H5Dio.c line ### in H5Dwrite():
    + *         not a dataset
    + *       major: Invalid arguments to routine
    + *       minor: Inappropriate type
    + *       
    + *
    + * In the line above error record #002 in the example above, the starting phrase is HDF5. This is the error + * class name of the HDF5 Library. All of the library’s error messages (major and minor) are in this default + * error class. The Error Test in the beginning of the line above error record #000 is the name of the + * application’s error class. The first two error records, #000 and #001, are from application’s error class. + * By definition, an error class is a group of major and minor error messages for a library (the HDF5 Library + * or an application library built on top of the HDF5 Library) or an application program. The error class can + * be registered for a library or program through the HDF5 Error API. Major and minor messages can be defined + * in an error class. An application will have object handles for the error class and for major and minor + * messages for further operation. See the example below. + * + * Example: The user‐defined error handler + * \code + * \#define MSG_SIZE 64 + * herr_t + * custom_print_cb(unsigned n, const H5E_error2_t *err_desc, + * void* client_data) + * { + * FILE *stream = (FILE *)client_data; + * char maj[MSG_SIZE]; + * char min[MSG_SIZE]; + * char cls[MSG_SIZE]; + * const int indent = 4; + * + * *** Get descriptions for the major and minor error numbers *** + * if(H5Eget_class_name(err_desc->cls_id, cls, MSG_SIZE) < 0) + * TEST_ERROR; + * if(H5Eget_msg(err_desc->maj_num, NULL, maj, MSG_SIZE) < 0) + * TEST_ERROR; + * if(H5Eget_msg(err_desc->min_num, NULL, min, MSG_SIZE) < 0) + * TEST_ERROR; + * fprintf (stream, “%*serror #%03d: %s in %s(): + * line %u\\n”, + * indent, “”, n, err_desc->file_name, + * err_desc->func_name, err_desc->line); + * fprintf (stream, “%*sclass: %s\\n”, indent*2, “”, cls); + * fprintf (stream, “%*smajor: %s\\n”, indent*2, “”, maj); + * fprintf (stream, “%*sminor: %s\\n”, indent*2, “”, min); + * return 0; + * error: + * return -1; + * } + * \endcode + * + * \subsubsection subsubsec_error_adv_more More Error API Functions + * The Error API has functions that can be used to register or unregister an error class, to create or close + * error messages, and to query an error class or error message. These functions are illustrated below. + * + * To register an error class: + * \code + * hid_t H5Eregister_class(const char* cls_name, const char* lib_name, const char* version) + * \endcode + * This function registers an error class with the HDF5 Library so that the application library or program + * can report errors together with the HDF5 Library. + * + * To add an error message to an error class: + * \code + * hid_t H5Ecreate_msg(hid_t class, H5E_type_t msg_type, const char* mesg) + * \endcode + * This function adds an error message to an error class defined by an application library or program. The + * error message can be either major or minor which is indicated by parameter msg_type. + * + * To get the name of an error class: + * \code + * ssize_t H5Eget_class_name(hid_t class_id, char* name, size_t size) + * \endcode + * This function retrieves the name of the error class specified by the class ID. + * + * To retrieve an error message: + * \code + * ssize_t H5Eget_msg(hid_t mesg_id, H5E_type_t* mesg_type, char* mesg, size_t size) + * \endcode + * This function retrieves the error message including its length and type. + * + * To close an error message: + * \code + * herr_t H5Eclose_msg(hid_t mesg_id) + * \endcode + * This function closes an error message. + * + * To remove an error class: + * \code + * herr_t H5Eunregister_class(hid_t class_id) + * \endcode + * This function removes an error class from the Error API. + * + * The example below shows how an application creates an error class and error messages. + * + * Example: Create an error class and error messages + * \code + * *** Create an error class *** + * class_id = H5Eregister_class(ERR_CLS_NAME, PROG_NAME, PROG_VERS); + * *** Retrieve class name *** + * H5Eget_class_name(class_id, cls_name, cls_size); + * *** Create a major error message in the class *** + * maj_id = H5Ecreate_msg(class_id, H5E_MAJOR, “... ...”); + * *** Create a minor error message in the class *** + * min_id = H5Ecreate_msg(class_id, H5E_MINOR, “... ...”); + * \endcode + * + * The example below shows how an application closes error messages and unregisters the error class. + * + * Example: Closing error messages and unregistering the error class + * \code + * H5Eclose_msg(maj_id); + * H5Eclose_msg(min_id); + * H5Eunregister_class(class_id); + * \endcode + * + * \subsubsection subsubsec_error_adv_app Pushing an Application Error Message onto Error Stack + * An application can push error records onto or pop error records off of the error stack just as the library + * does internally. An error stack can be registered, and an object handle can be returned to the application + * so that the application can manipulate a registered error stack. + * + * To register the current stack: + * \code + * hid_t H5Eget_current_stack(void) + * \endcode + * This function registers the current error stack, returns an object handle, and clears the current error + * stack. + * An empty error stack will also be assigned an ID. + * + * To replace the current error stack with another: + * \code + * herr_t H5Eset_current_stack(hid_t error_stack) + * \endcode + * This function replaces the current error stack with another error stack specified by error_stack and + * clears the current error stack. The object handle error_stack is closed after this function call. + * + * To push a new error record to the error stack: + * \code + * herr_t H5Epush(hid_t error_stack, const char* file, const char* func, + * unsigned line, hid_t cls_id, hid_t major_id, hid_t minor_id, + * const char* desc, ... ) + * \endcode + * This function pushes a new error record onto the error stack for the current thread. + * + * To delete some error messages: + * \code + * herr_t H5Epop(hid_t error_stack, size_t count) + * \endcode + * This function deletes some error messages from the error stack. + * + * To retrieve the number of error records: + * \code + * int H5Eget_num(hid_t error_stack) + * \endcode + * This function retrieves the number of error records from an error stack. + * + * To clear the error stack: + * \code + * herr_t H5Eclear_stack(hid_t error_stack) + * \endcode + * This function clears the error stack. + * + * To close the object handle for an error stack: + * \code + * herr_t H5Eclose_stack(hid_t error_stack) + * \endcode + * This function closes the object handle for an error stack and releases its resources. + * + * The example below shows how an application pushes an error record onto the default error stack. + * + * Example: Pushing an error message to an error stack + * \code + * *** Make call to HDF5 I/O routine *** + * if((dset_id=H5Dopen(file_id, dset_name, access_plist)) < 0) + * { + * *** Push client error onto error stack *** + * H5Epush(H5E_DEFAULT,__FILE__,FUNC,__LINE__,cls_id, + * CLIENT_ERR_MAJ_IO,CLIENT_ERR_MINOR_OPEN, “H5Dopen failed”); + * } + * *** Indicate error occurred in function *** + * return 0; + * \endcode + * + * The example below shows how an application registers the current error stack and + * creates an object handle to avoid another HDF5 function from clearing the error stack. + * + * Example: Registering the error stack + * \code + * if (H5Dwrite(dset_id, mem_type_id, mem_space_id, file_space_id, dset_xfer_plist_id, buf) < 0) + * { + * *** Push client error onto error stack *** + * H5Epush2(H5E_DEFAULT,__FILE__,FUNC,__LINE__,cls_id, + * CLIENT_ERR_MAJ_IO,CLIENT_ERR_MINOR_HDF5, + * “H5Dwrite failed”); + * *** Preserve the error stack by assigning an object handle to it *** + * error_stack = H5Eget_current_stack(); + * *** Close dataset *** + * H5Dclose(dset_id); + * *** Replace the current error stack with the preserved one *** + * H5Eset_current_stack(error_stack); + * } + * return 0; + * \endcode + * + * Previous Chapter \ref sec_attribute - Next Chapter \ref sec_plist + * + * \defgroup H5E Error Handling (H5E) * * \internal The \c FUNC_ENTER macro clears the error stack whenever an * interface function is entered. When an error is detected, an entry @@ -76,6 +548,8 @@ * error stack. The error stack is statically allocated to reduce the * complexity of handling errors within the \ref H5E package. * + * @see sec_error + * */ #endif /* H5Emodule_H */ diff --git a/src/H5Epublic.h b/src/H5Epublic.h index 0254c37..6e47d28 100644 --- a/src/H5Epublic.h +++ b/src/H5Epublic.h @@ -899,8 +899,8 @@ H5_DLL herr_t H5Ewalk1(H5E_direction_t direction, H5E_walk1_t func, void *client * * \deprecated 1.8.0 Function deprecated in this release. * - * \details Given a major error number, H5Eget_major() returns a constant - * character string that describes the error. + * \details H5Eget_major() returns a constant + * character string that describes the error, given a major error number. * * \attention This function returns a dynamically allocated string (\c char * array). An application calling this function must free the memory @@ -920,8 +920,8 @@ H5_DLL char *H5Eget_major(H5E_major_t maj); * * \deprecated 1.8.0 Function deprecated and return type changed in this release. * - * \details Given a minor error number, H5Eget_minor() returns a constant - * character string that describes the error. + * \details H5Eget_minor() returns a constant + * character string that describes the error, given a minor error number. * * \attention In the Release 1.8.x series, H5Eget_minor() returns a string of * dynamic allocated \c char array. An application calling this diff --git a/src/H5Fmodule.h b/src/H5Fmodule.h index 6047693..867ef0e 100644 --- a/src/H5Fmodule.h +++ b/src/H5Fmodule.h @@ -28,7 +28,1448 @@ #define H5_MY_PKG H5F #define H5_MY_PKG_ERR H5E_FILE -/**\defgroup H5F H5F +/** \page H5F_UG The HDF5 File + * + * \section sec_file The HDF5 File + * \subsection subsec_file_intro Introduction + * The purpose of this chapter is to describe how to work with HDF5 data files. + * + * If HDF5 data is to be written to or read from a file, the file must first be explicitly created or + * opened with the appropriate file driver and access privileges. Once all work with the file is + * complete, the file must be explicitly closed. + * + * This chapter discusses the following: + * \li File access modes + * \li Creating, opening, and closing files + * \li The use of file creation property lists + * \li The use of file access property lists + * \li The use of low-level file drivers + * + * This chapter assumes an understanding of the material presented in the data model chapter. For + * more information, @see @ref sec_data_model. + * + * \subsection subsec_file_access_modes File Access Modes + * There are two issues regarding file access: + *
    • What should happen when a new file is created but a file of the same name already + * exists? Should the create action fail, or should the existing file be overwritten?
    • + *
    • Is a file to be opened with read-only or read-write access?
    + * + * Four access modes address these concerns. Two of these modes can be used with #H5Fcreate, and + * two modes can be used with #H5Fopen. + * \li #H5Fcreate accepts #H5F_ACC_EXCL or #H5F_ACC_TRUNC + * \li #H5Fopen accepts #H5F_ACC_RDONLY or #H5F_ACC_RDWR + * + * The access modes are described in the table below. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    Access flags and modes
    Access FlagResulting Access Mode
    #H5F_ACC_EXCLIf the file already exists, #H5Fcreate fails. If the file does not exist, + * it is created and opened with read-write access. (Default)
    #H5F_ACC_TRUNCIf the file already exists, the file is opened with read-write access, + * and new data will overwrite any existing data. If the file does not exist, + * it is created and opened with read-write access.
    #H5F_ACC_RDONLYAn existing file is opened with read-only access. If the file does not + * exist, #H5Fopen fails. (Default)
    #H5F_ACC_RDWRAn existing file is opened with read-write access. If the file does not + * exist, #H5Fopen fails.
    + * + * By default, #H5Fopen opens a file for read-only access; passing #H5F_ACC_RDWR allows + * read-write access to the file. + * + * By default, #H5Fcreate fails if the file already exists; only passing #H5F_ACC_TRUNC allows + * the truncating of an existing file. + * + * \subsection subsec_file_creation_access File Creation and File Access Properties + * File creation and file access property lists control the more complex aspects of creating and + * accessing files. + * + * File creation property lists control the characteristics of a file such as the size of the userblock, + * a user-definable data block; the size of data address parameters; properties of the B-trees that are + * used to manage the data in the file; and certain HDF5 Library versioning information. + * + * For more information, @see @ref subsubsec_file_property_lists_props. + * + * This section has a more detailed discussion of file creation properties. If you have no special + * requirements for these file characteristics, you can simply specify #H5P_DEFAULT for the default + * file creation property list when a file creation property list is called for. + * + * File access property lists control properties and means of accessing a file such as data alignment + * characteristics, metadata block and cache sizes, data sieve buffer size, garbage collection + * settings, and parallel I/O. Data alignment, metadata block and cache sizes, and data sieve buffer + * size are factors in improving I/O performance. + * + * For more information, @see @ref subsubsec_file_property_lists_access. + * + * This section has a more detailed discussion of file access properties. If you have no special + * requirements for these file access characteristics, you can simply specify #H5P_DEFAULT for the + * default file access property list when a file access property list is called for. + * + * + * + * + * + * + *
    Figure 10 - More sample file structures
    + * \image html UML_FileAndProps.gif "UML model for an HDF5 file and its property lists" + *
    + * + * \subsection subsec_file_drivers Low-level File Drivers + * The concept of an HDF5 file is actually rather abstract: the address space for what is normally + * thought of as an HDF5 file might correspond to any of the following at the storage level: + * \li Single file on a standard file system + * \li Multiple files on a standard file system + * \li Multiple files on a parallel file system + * \li Block of memory within an application’s memory space + * \li More abstract situations such as virtual files + * + * This HDF5 address space is generally referred to as an HDF5 file regardless of its organization at + * the storage level. + * + * HDF5 accesses a file (the address space) through various types of low-level file drivers. The + * default HDF5 file storage layout is as an unbuffered permanent file which is a single, contiguous + * file on local disk. Alternative layouts are designed to suit the needs of a variety of systems, + * environments, and applications. + * + * \subsection subsec_file_program_model Programming Model for Files + * Programming models for creating, opening, and closing HDF5 files are described in the + * sub-sections below. + * + * \subsubsection subsubsec_file_program_model_create Creating a New File + * The programming model for creating a new HDF5 file can be summarized as follows: + * \li Define the file creation property list + * \li Define the file access property list + * \li Create the file + * + * First, consider the simple case where we use the default values for the property lists. See the + * example below. + * + * Creating an HDF5 file using property list defaults + * \code + * file_id = H5Fcreate ("SampleFile.h5", H5F_ACC_EXCL, H5P_DEFAULT, H5P_DEFAULT) + * \endcode + * + * Note: The example above specifies that #H5Fcreate should fail if SampleFile.h5 already exists. + * + * A more complex case is shown in the example below. In this example, we define file creation + * and access property lists (though we do not assign any properties), specify that #H5Fcreate + * should fail if SampleFile.h5 already exists, and create a new file named SampleFile.h5. The example + * does not specify a driver, so the default driver, #H5FD_SEC2, will be used. + * + * Creating an HDF5 file using property lists + * \code + * fcplist_id = H5Pcreate (H5P_FILE_CREATE) + * <...set desired file creation properties...> + * faplist_id = H5Pcreate (H5P_FILE_ACCESS) + * <...set desired file access properties...> + * file_id = H5Fcreate ("SampleFile.h5", H5F_ACC_EXCL, fcplist_id, faplist_id) + * \endcode + * Notes: + * 1. A root group is automatically created in a file when the file is first created. + * + * 2. File property lists, once defined, can be reused when another file is created within the same + * application. + * + * \subsubsection subsubsec_file_program_model_open Opening an Existing File + * The programming model for opening an existing HDF5 file can be summarized as follows: + *
    • Define or modify the file access property list including a low-level file driver (optional)
    • + *
    • Open the file
    + * + * The code in the example below shows how to open an existing file with read-only access. + * + * Opening an HDF5 file + * \code + * faplist_id = H5Pcreate (H5P_FILE_ACCESS) + * status = H5Pset_fapl_stdio (faplist_id) + * file_id = H5Fopen ("SampleFile.h5", H5F_ACC_RDONLY, faplist_id) + * \endcode + * + * \subsubsection subsubsec_file_program_model_close Closing a File + * The programming model for closing an HDF5 file is very simple: + * \li Close file + * + * We close SampleFile.h5 with the code in the example below. + * + * Closing an HDF5 file + * \code + * status = H5Fclose (file_id) + * \endcode + * Note that #H5Fclose flushes all unwritten data to storage and that file_id is the identifier returned + * for SampleFile.h5 by #H5Fopen. + * + * More comprehensive discussions regarding all of these steps are provided below. + * + * \subsection subsec_file_h5dump Using h5dump to View a File + * h5dump is a command-line utility that is included in the HDF5 distribution. This program + * provides a straight-forward means of inspecting the contents of an HDF5 file. You can use + * h5dump to verify that a program is generating the intended HDF5 file. h5dump displays ASCII + * output formatted according to the HDF5 DDL grammar. + * + * The following h5dump command will display the contents of SampleFile.h5: + * \code + * h5dump SampleFile.h5 + * \endcode + * + * If no datasets or groups have been created in and no data has been written to the file, the output + * will look something like the following: + * \code + * HDF5 "SampleFile.h5" { + * GROUP "/" { + * } + * } + * \endcode + * + * Note that the root group, indicated above by /, was automatically created when the file was created. + * + * h5dump is described on the + * Tools + * page under + * + * Libraries and Tools Reference. + * The HDF5 DDL grammar is described in the document \ref DDLBNF110. + * + * \subsection subsec_file_summary File Function Summaries + * General library (\ref H5 functions and macros), (\ref H5F functions), file related + * (\ref H5P functions), and file driver (\ref H5P functions) are listed below. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    General library functions and macros
    FunctionPurpose
    #H5check_versionVerifies that HDF5 library versions are consistent.
    #H5closeFlushes all data to disk, closes all open identifiers, and cleans up memory.
    #H5dont_atexitInstructs the library not to install the atexit cleanup routine.
    #H5garbage_collectGarbage collects on all free-lists of all types.
    #H5get_libversionReturns the HDF library release number.
    #H5openInitializes the HDF5 library.
    #H5set_free_list_limitsSets free-list size limits.
    #H5_VERSION_GEDetermines whether the version of the library being used is greater than or equal + * to the specified version.
    #H5_VERSION_LEDetermines whether the version of the library being used is less than or equal + * to the specified version.
    + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    File functions
    FunctionPurpose
    #H5Fclear_elink_file_cacheClears the external link open file cache for a file.
    #H5FcloseCloses HDF5 file.
    #H5FcreateCreates new HDF5 file.
    #H5FflushFlushes data to HDF5 file on storage medium.
    #H5Fget_access_plistReturns a file access property list identifier.
    #H5Fget_create_plistReturns a file creation property list identifier.
    #H5Fget_file_imageRetrieves a copy of the image of an existing, open file.
    #H5Fget_filesizeReturns the size of an HDF5 file.
    #H5Fget_freespaceReturns the amount of free space in a file.
    #H5Fget_infoReturns global information for a file.
    #H5Fget_intentDetermines the read/write or read-only status of a file.
    #H5Fget_mdc_configObtain current metadata cache configuration for target file.
    #H5Fget_mdc_hit_rateObtain target file’s metadata cache hit rate.
    #H5Fget_mdc_sizeObtain current metadata cache size data for specified file.
    #H5Fget_mpi_atomicityRetrieves the atomicity mode in use.
    #H5Fget_nameRetrieves the name of the file to which the object belongs.
    #H5Fget_obj_countReturns the number of open object identifiers for an open file.
    #H5Fget_obj_idsReturns a list of open object identifiers.
    #H5Fget_vfd_handleReturns pointer to the file handle from the virtual file driver.
    #H5Fis_hdf5Determines whether a file is in the HDF5 format.
    #H5FmountMounts a file.
    #H5FopenOpens an existing HDF5 file.
    #H5FreopenReturns a new identifier for a previously-opened HDF5 file.
    #H5Freset_mdc_hit_rate_statsReset hit rate statistics counters for the target file.
    #H5Fset_mdc_configUse to configure metadata cache of target file.
    #H5Fset_mpi_atomicityUse to set the MPI atomicity mode.
    #H5FunmountUnmounts a file.
    + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    File creation property list functions
    FunctionPurpose
    #H5Pset_userblock/#H5Pget_userblockSets/retrieves size of userblock.
    #H5Pset_sizes/#H5Pget_sizesSets/retrieves byte size of offsets and lengths used to address objects in HDF5 file.
    #H5Pset_sym_k/#H5Pget_sym_kSets/retrieves size of parameters used to control symbol table nodes.
    #H5Pset_istore_k/#H5Pget_istore_kSets/retrieves size of parameter used to control B-trees for indexing chunked datasets.
    #H5Pset_file_imageSets an initial file image in a memory buffer.
    #H5Pget_file_imageRetrieves a copy of the file image designated as the initial content and structure of a file.
    #H5Pset_shared_mesg_nindexes/#H5Pget_shared_mesg_nindexesSets or retrieves number of shared object header message indexes in file + * creation property list.
    #H5Pset_shared_mesg_indexConfigures the specified shared object header message index.
    #H5Pget_shared_mesg_indexRetrieves the configuration settings for a shared message index.
    #H5Pset_shared_mesg_phase_change/#H5Pget_shared_mesg_phase_changeSets or retrieves shared object header message storage phase change thresholds.
    #H5Pget_version
    + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    File access property list functions
    FunctionPurpose
    #H5Pset_alignment/#H5Pget_alignmentSets/retrieves alignment properties.
    #H5Pset_cache/#H5Pget_cacheSets/retrieves metadata cache and raw data chunk cache parameters.
    #H5Pset_elink_file_cache_size/#H5Pget_elink_file_cache_sizeSets/retrieves the size of the external link open file cache from the specified + * file access property list.
    #H5Pset_gc_references/#H5Pget_gc_referencesSets/retrieves garbage collecting references flag.
    #H5Pset_family_offsetSets offset property for low-level access to a file in a family of files.
    #H5Pget_family_offsetRetrieves a data offset from the file access property list.
    #H5Pset_meta_block_size/#H5Pget_meta_block_sizeSets the minimum metadata blocksize or retrieves the current metadata block size setting.
    #H5Pset_mdc_configSet the initial metadata cache configuration in the indicated File Access Property List + * to the supplied value.
    #H5Pget_mdc_configGet the current initial metadata cache config-uration from the indicated File Access + * Property List.
    #H5Pset_sieve_buf_size/#H5Pget_sieve_buf_sizeSets/retrieves maximum size of data sieve buffer.
    #H5Pset_libver_boundsSets bounds on library versions, and indirectly format versions, to be used + * when creating objects.
    #H5Pget_libver_boundsRetrieves library version bounds settings that indirectly control the format + * versions used when creating objects.
    #H5Pset_small_data_block_sizeSets the size of a contiguous block reserved for small data.
    #H5Pget_small_data_block_sizeRetrieves the current small data block size setting.
    + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    File driver functions
    FunctionPurpose
    #H5Pset_driverSets a file driver.
    #H5Pget_driverReturns the identifier for the driver used to create a file.
    #H5Pget_driver_infoReturns a pointer to file driver information.
    #H5Pset_fapl_core/#H5Pget_fapl_coreSets the driver for buffered memory files (in RAM) or retrieves information regarding + * the driver.
    #H5Pset_fapl_direct/#H5Pget_fapl_directSets up use of the direct I/O driver or retrieves the direct I/O driver settings.
    #H5Pset_fapl_family/#H5Pget_fapl_familySets driver for file families, designed for systems that do not support files + * larger than 2 gigabytes, or retrieves information regarding driver.
    #H5Pset_fapl_logSets logging driver.
    #H5Pset_fapl_mpio/#H5Pget_fapl_mpioSets driver for files on parallel file systems (MPI I/O) or retrieves information + * regarding the driver.
    H5Pset_fapl_mpiposix/H5Pget_fapl_mpiposixNo longer available.
    #H5Pset_fapl_multi/#H5Pget_fapl_multiSets driver for multiple files, separating categories of metadata and raw data, + * or retrieves information regarding driver.
    #H5Pset_fapl_sec2Sets driver for unbuffered permanent files or retrieves information regarding driver.
    #H5Pset_fapl_splitSets driver for split files, a limited case of multiple files with one metadata file + * and one raw data file.
    #H5Pset_fapl_stdioSets driver for buffered permanent files.
    #H5Pset_fapl_windowsSets the Windows I/O driver.
    #H5Pset_multi_typeSpecifies type of data to be accessed via the MULTI driver enabling more direct access.
    #H5Pget_multi_typeRetrieves type of data property for MULTI driver.
    + * + * \subsection subsec_file_create Creating or Opening an HDF5 File + * This section describes in more detail how to create and how to open files. + * + * New HDF5 files are created and opened with #H5Fcreate; existing files are opened with + * #H5Fopen. Both functions return an object identifier which must eventually be released by calling + * #H5Fclose. + * + * To create a new file, call #H5Fcreate: + * \code + * hid_t H5Fcreate (const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id) + * \endcode + * + * #H5Fcreate creates a new file named name in the current directory. The file is opened with read + * and write access; if the #H5F_ACC_TRUNC flag is set, any pre-existing file of the same name in + * the same directory is truncated. If #H5F_ACC_TRUNC is not set or #H5F_ACC_EXCL is set and + * if a file of the same name exists, #H5Fcreate will fail. + * + * The new file is created with the properties specified in the property lists fcpl_id and fapl_id. + * fcpl is short for file creation property list. fapl is short for file access property list. Specifying + * #H5P_DEFAULT for either the creation or access property list will use the library’s default + * creation or access properties. + * + * If #H5Fcreate successfully creates the file, it returns a file identifier for the new file. This + * identifier will be used by the application any time an object identifier, an OID, for the file is + * required. Once the application has finished working with a file, the identifier should be released + * and the file closed with #H5Fclose. + * + * To open an existing file, call #H5Fopen: + * \code + * hid_t H5Fopen (const char *name, unsigned flags, hid_t fapl_id) + * \endcode + * + * #H5Fopen opens an existing file with read-write access if #H5F_ACC_RDWR is set and read-only + * access if #H5F_ACC_RDONLY is set. + * + * fapl_id is the file access property list identifier. Alternatively, #H5P_DEFAULT indicates that the + * application relies on the default I/O access parameters. Creating and changing access property + * lists is documented further below. + * + * A file can be opened more than once via multiple #H5Fopen calls. Each such call returns a unique + * file identifier and the file can be accessed through any of these file identifiers as long as they + * remain valid. Each of these file identifiers must be released by calling #H5Fclose when it is no + * longer needed. + * + * For more information, @see @ref subsubsec_file_property_lists_access. + * For more information, @see @ref subsec_file_property_lists. + * + * \subsection subsec_file_closes Closing an HDF5 File + * #H5Fclose both closes a file and releases the file identifier returned by #H5Fopen or #H5Fcreate. + * #H5Fclose must be called when an application is done working with a file; while the HDF5 + * Library makes every effort to maintain file integrity, failure to call #H5Fclose may result in the + * file being abandoned in an incomplete or corrupted state. + * + * To close a file, call #H5Fclose: + * \code + * herr_t H5Fclose (hid_t file_id) + * \endcode + * This function releases resources associated with an open file. After closing a file, the file + * identifier, file_id, cannot be used again as it will be undefined. + * + * #H5Fclose fulfills three purposes: to ensure that the file is left in an uncorrupted state, to ensure + * that all data has been written to the file, and to release resources. Use #H5Fflush if you wish to + * ensure that all data has been written to the file but it is premature to close it. + * + * Note regarding serial mode behavior: When #H5Fclose is called in serial mode, it closes the file + * and terminates new access to it, but it does not terminate access to objects that remain + * individually open within the file. That is, if #H5Fclose is called for a file but one or more objects + * within the file remain open, those objects will remain accessible until they are individually + * closed. To illustrate, assume that a file, fileA, contains a dataset, data_setA, and that both are + * open when #H5Fclose is called for fileA. data_setA will remain open and accessible, including + * writable, until it is explicitly closed. The file will be automatically and finally closed once all + * objects within it have been closed. + * + * Note regarding parallel mode behavior: Once #H5Fclose has been called in parallel mode, access + * is no longer available to any object within the file. + * + * \subsection subsec_file_property_lists File Property Lists + * Additional information regarding file structure and access are passed to #H5Fcreate and + * #H5Fopen through property list objects. Property lists provide a portable and extensible method of + * modifying file properties via simple API functions. There are two kinds of file-related property + * lists: + * \li File creation property lists + * \li File access property lists + * + * In the following sub-sections, we discuss only one file creation property, userblock size, in detail + * as a model for the user. Other file creation and file access properties are mentioned and defined + * briefly, but the model is not expanded for each; complete syntax, parameter, and usage + * information for every property list function is provided in the \ref H5P + * section of the HDF5 Reference Manual. + * + * For more information, @see @ref sec_plist. + * + * \subsubsection subsubsec_file_property_lists_create Creating a Property List + * If you do not wish to rely on the default file creation and access properties, you must first create + * a property list with #H5Pcreate. + * \code + * hid_t H5Pcreate (hid_t cls_id) + * \endcode + * cls_id is the type of property list being created. In this case, the appropriate values are + * #H5P_FILE_CREATE for a file creation property list and #H5P_FILE_ACCESS for a file access + * property list. + * + * Thus, the following calls create a file creation property list and a file access property list with + * identifiers fcpl_id and fapl_id, respectively: + * \code + * fcpl_id = H5Pcreate (H5P_FILE_CREATE) + * fapl_id = H5Pcreate (H5P_FILE_ACCESS) + * \endcode + * + * Once the property lists have been created, the properties themselves can be modified via the + * functions described in the following sub-sections. + * + * \subsubsection subsubsec_file_property_lists_props File Creation Properties + * File creation property lists control the file metadata, which is maintained in the superblock of the + * file. These properties are used only when a file is first created. + * + *

    Userblock Size

    + * \code + * herr_t H5Pset_userblock (hid_t plist, hsize_t size) + * herr_t H5Pget_userblock (hid_t plist, hsize_t *size) + * \endcode + * + * The userblock is a fixed-length block of data located at the beginning of the file and is ignored + * by the HDF5 library. This block is specifically set aside for any data or information that + * developers determine to be useful to their applications but that will not be used by the HDF5 + * library. The size of the userblock is defined in bytes and may be set to any power of two with a + * minimum size of 512 bytes. In other words, userblocks might be 512, 1024, or 2048 bytes in + * size. + * + * This property is set with #H5Pset_userblock and queried via #H5Pget_userblock. For example, if + * an application needed a 4K userblock, then the following function call could be used: + * \code + * status = H5Pset_userblock(fcpl_id, 4096) + * \endcode + * + * The property list could later be queried with: + * \code + * status = H5Pget_userblock(fcpl_id, size) + * \endcode + * and the value 4096 would be returned in the parameter size. + * + * Other properties, described below, are set and queried in exactly the same manner. Syntax and + * usage are detailed in the @ref H5P section of the HDF5 Reference Manual. + * + *

    Offset and Length Sizes

    + * This property specifies the number of bytes used to store the offset and length of objects in the + * HDF5 file. Values of 2, 4, and 8 bytes are currently supported to accommodate 16-bit, 32-bit, + * and 64-bit file address spaces. + * + * These properties are set and queried via #H5Pset_sizes and #H5Pget_sizes. + * + *

    Symbol Table Parameters

    + * The size of symbol table B-trees can be controlled by setting the 1/2-rank and 1/2-node size + * parameters of the B-tree. + * + * These properties are set and queried via #H5Pset_sym_k and #H5Pget_sym_k + * + *

    Indexed Storage Parameters

    + * The size of indexed storage B-trees can be controlled by setting the 1/2-rank and 1/2-node size + * parameters of the B-tree. + * + * These properties are set and queried via #H5Pset_istore_k and #H5Pget_istore_k. + * + *

    Version Information

    + * Various objects in an HDF5 file may over time appear in different versions. The HDF5 Library + * keeps track of the version of each object in the file. + * + * Version information is retrieved via #H5Pget_version. + * + * \subsubsection subsubsec_file_property_lists_access File Access Properties + * This section discusses file access properties that are not related to the low-level file drivers. File + * drivers are discussed separately later in this chapter. + * For more information, @see @ref subsec_file_alternate_drivers. + * + * File access property lists control various aspects of file I/O and structure. + * + *

    Data Alignment

    + * Sometimes file access is faster if certain data elements are aligned in a specific manner. This can + * be controlled by setting alignment properties via the #H5Pset_alignment function. There are two + * values involved: + * \li A threshold value + * \li An alignment interval + * + * Any allocation request at least as large as the threshold will be aligned on an address that is a + * multiple of the alignment interval. + * + *

    Metadata Block Allocation Size

    + * Metadata typically exists as very small chunks of data; storing metadata elements in a file + * without blocking them can result in hundreds or thousands of very small data elements in the + * file. This can result in a highly fragmented file and seriously impede I/O. By blocking metadata + * elements, these small elements can be grouped in larger sets, thus alleviating both problems. + * + * #H5Pset_meta_block_size sets the minimum size in bytes of metadata block allocations. + * #H5Pget_meta_block_size retrieves the current minimum metadata block allocation size. + * + *

    Metadata Cache

    + * Metadata and raw data I/O speed are often governed by the size and frequency of disk reads and + * writes. In many cases, the speed can be substantially improved by the use of an appropriate + * cache. + * + * #H5Pset_cache sets the minimum cache size for both metadata and raw data and a preemption + * value for raw data chunks. #H5Pget_cache retrieves the current values. + * + *

    Data Sieve Buffer Size

    + * Data sieve buffering is used by certain file drivers to speed data I/O and is most commonly when + * working with dataset hyperslabs. For example, using a buffer large enough to hold several pieces + * of a dataset as it is read in for hyperslab selections will boost performance noticeably. + * + * #H5Pset_sieve_buf_size sets the maximum size in bytes of the data sieve buffer. + * #H5Pget_sieve_buf_size retrieves the current maximum size of the data sieve buffer. + * + *

    Garbage Collection References

    + * Dataset region references and other reference types use space in an HDF5 file’s global heap. If + * garbage collection is on (1) and the user passes in an uninitialized value in a reference structure, + * the heap might become corrupted. When garbage collection is off (0), however, and the user reuses + * a reference, the previous heap block will be orphaned and not returned to the free heap + * space. When garbage collection is on, the user must initialize the reference structures to 0 or risk + * heap corruption. + * + * #H5Pset_gc_references sets the garbage collecting references flag. + * + * \subsection subsec_file_alternate_drivers Alternate File Storage Layouts and Low-level File Drivers + * The concept of an HDF5 file is actually rather abstract: the address space for what is normally + * thought of as an HDF5 file might correspond to any of the following: + * \li Single file on standard file system + * \li Multiple files on standard file system + * \li Multiple files on parallel file system + * \li Block of memory within application’s memory space + * \li More abstract situations such as virtual files + * + * This HDF5 address space is generally referred to as an HDF5 file regardless of its organization at + * the storage level. + * + * HDF5 employs an extremely flexible mechanism called the virtual file layer, or VFL, for file + * I/O. A full understanding of the VFL is only necessary if you plan to write your own drivers + * @see \ref VFL in the HDF5 Technical Notes. + * + * For our + * purposes here, it is sufficient to know that the low-level drivers used for file I/O reside in the + * VFL, as illustrated in the following figure. Note that H5FD_STREAM is not available with 1.8.x + * and later versions of the library. + * + * + * + * + * + *
    + * \image html VFL_Drivers.gif "I/O path from application to VFL and low-level drivers to storage" + *
    + * + * As mentioned above, HDF5 applications access HDF5 files through various low-level file + * drivers. The default driver for that layout is the POSIX driver (also known as the SEC2 driver), + * #H5FD_SEC2. Alternative layouts and drivers are designed to suit the needs of a variety of + * systems, environments, and applications. The drivers are listed in the table below. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    Supported file drivers
    Driver NameDriver IdentifierDescriptionRelated API
    POSIX#H5FD_SEC2This driver uses POSIX file-system functions like read and write to perform I/O to a single, + * permanent file on local disk with no system buffering. This driver is POSIX-compliant and is + * the default file driver for all systems.#H5Pset_fapl_sec2
    Direct#H5FD_DIRECTThis is the #H5FD_SEC2 driver except data is written to or read from the file + * synchronously without being cached by the system.#H5Pset_fapl_direct
    Log#H5FD_LOGThis is the #H5FD_SEC2 driver with logging capabilities.#H5Pset_fapl_log
    Windows#H5FD_WINDOWSThis driver was modified in HDF5-1.8.8 to be a wrapper of the POSIX driver, + * #H5FD_SEC2. This change should not affect user applications.#H5Pset_fapl_windows
    STDIO#H5FD_STDIOThis driver uses functions from the standard C stdio.h to perform I/O + * to a single, permanent file on local disk with additional system buffering.#H5Pset_fapl_stdio
    Memory#H5FD_COREWith this driver, an application can work with a file in memory for faster reads and + * writes. File contents are kept in memory until the file is closed. At closing, the memory + * version of the file can be written back to disk or abandoned.#H5Pset_fapl_core
    Family#H5FD_FAMILYWith this driver, the HDF5 file’s address space is partitioned into pieces and sent to + * separate storage files using an underlying driver of the user’s choice. This driver is for + * systems that do not support files larger than 2 gigabytes.#H5Pset_fapl_family
    Multi#H5FD_MULTIWith this driver, data can be stored in multiple files according to the type of the data. + * I/O might work better if data is stored in separate files based on the type of data. The Split + * driver is a special case of this driver.#H5Pset_fapl_multi
    SplitH5FD_SPLITThis file driver splits a file into two parts. One part stores metadata, and the other part + * stores raw data. This splitting a file into two parts is a limited case of the Multi driver.#H5Pset_fapl_split
    Parallel#H5FD_MPIOThis is the standard HDF5 file driver for parallel file systems. This driver uses the MPI + * standard for both communication and file I/O.#H5Pset_fapl_mpio
    Parallel POSIXH5FD_MPIPOSIXThis driver is no longer available
    StreamH5FD_STREAMThis driver is no longer available.
    + * + * For more information, see the HDF5 Reference Manual entries for the function calls shown in + * the column on the right in the table above. + * + * Note that the low-level file drivers manage alternative file storage layouts. Dataset storage + * layouts (chunking, compression, and external dataset storage) are managed independently of file + * storage layouts. + * + * If an application requires a special-purpose low-level driver, the VFL provides a public API for + * creating one. For more information on how to create a driver, + * @see @ref VFL in the HDF5 Technical Notes. + * + * \subsubsection subsubsec_file_alternate_drivers_id Identifying the Previously‐used File Driver + * When creating a new HDF5 file, no history exists, so the file driver must be specified if it is to be + * other than the default. + * + * When opening existing files, however, the application may need to determine which low-level + * driver was used to create the file. The function #H5Pget_driver is used for this purpose. See the + * example below. + * + * Identifying a driver + * \code + * hid_t H5Pget_driver (hid_t fapl_id) + * \endcode + * + * #H5Pget_driver returns a constant identifying the low-level driver for the access property list + * fapl_id. For example, if the file was created with the POSIX (aka SEC2) driver, + * #H5Pget_driver returns #H5FD_SEC2. + * + * If the application opens an HDF5 file without both determining the driver used to create the file + * and setting up the use of that driver, the HDF5 Library will examine the superblock and the + * driver definition block to identify the driver. + * See the HDF5 File Format Specification + * for detailed descriptions of the superblock and the driver definition block. + * + * \subsubsection subsubsec_file_alternate_drivers_sec2 The POSIX (aka SEC2) Driver + * The POSIX driver, #H5FD_SEC2, uses functions from section 2 of the POSIX manual to access + * unbuffered files stored on a local file system. This driver is also known as the SEC2 driver. The + * HDF5 Library buffers metadata regardless of the low-level driver, but using this driver prevents + * data from being buffered again by the lowest layers of the library. + * + * The function #H5Pset_fapl_sec2 sets the file access properties to use the POSIX driver. See the + * example below. + * + * Using the POSIX, aka SEC2, driver + * \code + * herr_t H5Pset_fapl_sec2 (hid_t fapl_id) + * \endcode + * + * Any previously-defined driver properties are erased from the property list. + * + * Additional parameters may be added to this function in the future. Since there are no additional + * variable settings associated with the POSIX driver, there is no H5Pget_fapl_sec2 function. + * + * \subsubsection subsubsec_file_alternate_drivers_direct The Direct Driver + * The Direct driver, #H5FD_DIRECT, functions like the POSIX driver except that data is written to + * or read from the file synchronously without being cached by the system. + * + * The functions #H5Pset_fapl_direct and #H5Pget_fapl_direct are used to manage file access properties. + * See the example below. + * + * Using the Direct driver + * \code + * herr_t H5Pset_fapl_direct(hid_t fapl_id, size_t alignment, size_t block_size, size_t cbuf_size) + * herr_t H5Pget_fapl_direct(hid_t fapl_id, size_t *alignment, size_t *block_size, size_t *cbuf_size) + * \endcode + * + * #H5Pset_fapl_direct sets the file access properties to use the Direct driver; any previously defined + * driver properties are erased from the property list. #H5Pget_fapl_direct retrieves the file access + * properties used with the Direct driver. fapl_id is the file access property list identifier. + * alignment is the memory alignment boundary. block_size is the file system block size. + * cbuf_size is the copy buffer size. + * + * Additional parameters may be added to this function in the future. + * + * \subsubsection subsubsec_file_alternate_drivers_log The Log Driver + * The Log driver, #H5FD_LOG, is designed for situations where it is necessary to log file access + * activity. + * + * The function #H5Pset_fapl_log is used to manage logging properties. See the example below. + * + * Logging file access + * \code + * herr_t H5Pset_fapl_log (hid_t fapl_id, const char *logfile, unsigned int flags, size_t buf_size) + * \endcode + * + * #H5Pset_fapl_log sets the file access property list to use the Log driver. File access characteristics + * are identical to access via the POSIX driver. Any previously defined driver properties are erased + * from the property list. + * + * Log records are written to the file logfile. + * + * The logging levels set with the verbosity parameter are shown in the table below. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    Logging levels
    LevelComments
    0Performs no logging.
    1Records where writes and reads occur in the file.
    2Records where writes and reads occur in the file and what kind of data is written + * at each location. This includes raw data or any of several types of metadata + * (object headers, superblock, B-tree data, local headers, or global headers).
    + * + * There is no H5Pget_fapl_log function. + * + * Additional parameters may be added to this function in the future. + * + * \subsubsection subsubsec_file_alternate_drivers_win The Windows Driver + * The Windows driver, #H5FD_WINDOWS, was modified in HDF5-1.8.8 to be a wrapper of the + * POSIX driver, #H5FD_SEC2. In other words, if the Windows drivers is used, any file I/O will + * instead use the functionality of the POSIX driver. This change should be transparent to all user + * applications. The Windows driver used to be the default driver for Windows systems. The + * POSIX driver is now the default. + * + * The function #H5Pset_fapl_windows sets the file access properties to use the Windows driver. + * See the example below. + * + * Using the Windows driver + * \code + * herr_t H5Pset_fapl_windows (hid_t fapl_id) + * \endcode + * + * Any previously-defined driver properties are erased from the property list. + * + * Additional parameters may be added to this function in the future. Since there are no additional + * variable settings associated with the POSIX driver, there is no H5Pget_fapl_windows function. + * + * \subsubsection subsubsec_file_alternate_drivers_stdio The STDIO Driver + * The STDIO driver, #H5FD_STDIO, accesses permanent files in a local file system like the + * POSIX driver does. The STDIO driver also has an additional layer of buffering beneath the + * HDF5 Library. + * + * The function #H5Pset_fapl_stdio sets the file access properties to use the STDIO driver. See the + * example below. + * + * Using the STDIO driver + * \code + * herr_t H5Pset_fapl_stdio (hid_t fapl_id) + * \endcode + * + * Any previously defined driver properties are erased from the property list. + * + * Additional parameters may be added to this function in the future. Since there are no additional + * variable settings associated with the STDIO driver, there is no H5Pget_fapl_stdio function. + * + * \subsubsection subsubsec_file_alternate_drivers_mem The Memory (aka Core) Driver + * There are several situations in which it is reasonable, sometimes even required, to maintain a file + * entirely in system memory. You might want to do so if, for example, either of the following + * conditions apply: + *
    • Performance requirements are so stringent that disk latency is a limiting factor
    • + *
    • You are working with small, temporary files that will not be retained and, thus, + * need not be written to storage media
    + * + * The Memory driver, #H5FD_CORE, provides a mechanism for creating and managing such in memory files. + * The functions #H5Pset_fapl_core and #H5Pget_fapl_core manage file access + * properties. See the example below. + * + * Managing file access for in-memory files + * \code + * herr_t H5Pset_fapl_core (hid_t access_properties, size_t block_size, hbool_t backing_store) + * herr_t H5Pget_fapl_core (hid_t access_properties, size_t *block_size), hbool_t *backing_store) + * \endcode + * + * #H5Pset_fapl_core sets the file access property list to use the Memory driver; any previously + * defined driver properties are erased from the property list. + * + * Memory for the file will always be allocated in units of the specified block_size. + * + * The backing_store Boolean flag is set when the in-memory file is created. + * backing_store indicates whether to write the file contents to disk when the file is closed. If + * backing_store is set to 1 (TRUE), the file contents are flushed to a file with the same name as the + * in-memory file when the file is closed or access to the file is terminated in memory. If + * backing_store is set to 0 (FALSE), the file is not saved. + * + * The application is allowed to open an existing file with the #H5FD_CORE driver. While using + * #H5Fopen to open an existing file, if backing_store is set to 1 and the flag for #H5Fopen is set to + * #H5F_ACC_RDWR, changes to the file contents will be saved to the file when the file is closed. + * If backing_store is set to 0 and the flag for #H5Fopen is set to #H5F_ACC_RDWR, changes to the + * file contents will be lost when the file is closed. If the flag for #H5Fopen is set to + * #H5F_ACC_RDONLY, no change to the file will be allowed either in memory or on file. + * + * If the file access property list is set to use the Memory driver, #H5Pget_fapl_core will return + * block_size and backing_store with the relevant file access property settings. + * + * Note the following important points regarding in-memory files: + *
    • Local temporary files are created and accessed directly from memory without ever + * being written to disk
    • + *
    • Total file size must not exceed the available virtual memory
    • + *
    • Only one HDF5 file identifier can be opened for the file, the identifier returned by + * #H5Fcreate or #H5Fopen
    • + *
    • The changes to the file will be discarded when access is terminated unless + * backing_store is set to 1
    + * + * Additional parameters may be added to these functions in the future. + * + * @see + * HDF5 File Image Operations + * section for information on more advanced usage of the Memory file driver, and + * @see + * Modified Region Writes + * section for information on how to set write operations so that only modified regions are written + * to storage. + * + * \subsubsection subsubsec_file_alternate_drivers_family The Family Driver + * HDF5 files can become quite large, and this can create problems on systems that do not support + * files larger than 2 gigabytes. The HDF5 file family mechanism is designed to solve the problems + * this creates by splitting the HDF5 file address space across several smaller files. This structure + * does not affect how metadata and raw data are stored: they are mixed in the address space just as + * they would be in a single, contiguous file. + * + * HDF5 applications access a family of files via the Family driver, #H5FD_FAMILY. The + * functions #H5Pset_fapl_family and #H5Pget_fapl_family are used to manage file family + * properties. See the example below. + * + * Managing file family properties + * \code + * herr_t H5Pset_fapl_family (hid_t fapl_id, + * hsize_t memb_size, hid_t member_properties) + * herr_t H5Pget_fapl_family (hid_t fapl_id, + * hsize_t *memb_size, hid_t *member_properties) + * \endcode + * + * Each member of the family is the same logical size though the size and disk storage reported by + * file system listing tools may be substantially smaller. Examples of file system listing tools are + * \code + * ls -l + * \endcode + * on a Unix system or the detailed folder listing on an Apple or Microsoft Windows + * system. The name passed to #H5Fcreate or #H5Fopen should include a printf(3c)-style integer + * format specifier which will be replaced with the family member number. The first family + * member is numbered zero (0). + * + * #H5Pset_fapl_family sets the access properties to use the Family driver; any previously defined + * driver properties are erased from the property list. member_properties will serve as the file + * access property list for each member of the file family. memb_size specifies the logical size, in + * bytes, of each family member. memb_size is used only when creating a new file or truncating an + * existing file; otherwise the member size is determined by the size of the first member of the + * family being opened. Note: If the size of the off_t type is four bytes, the maximum family + * member size is usually 2^31-1 because the byte at offset 2,147,483,647 is generally inaccessible. + * + * #H5Pget_fapl_family is used to retrieve file family properties. If the file access property list is set + * to use the Family driver, member_properties will be returned with a pointer to a copy of the + * appropriate member access property list. If memb_size is non-null, it will contain the logical + * size, in bytes, of family members. + * + * Additional parameters may be added to these functions in the future. + * + *

    Unix Tools and an HDF5 Utility

    + * It occasionally becomes necessary to repartition a file family. A command-line utility for this + * purpose, h5repart, is distributed with the HDF5 library. + * + * \code + * h5repart [-v] [-b block_size[suffix]] [-m member_size[suffix]] source destination + * \endcode + * + * h5repart repartitions an HDF5 file by copying the source file or file family to the destination file + * or file family, preserving holes in the underlying UNIX files. Families are used for the source + * and/or destination if the name includes a printf-style integer format such as %d. The -v switch + * prints input and output file names on the standard error stream for progress monitoring, -b sets + * the I/O block size (the default is 1KB), and -m sets the output member size if the destination is a + * family name (the default is 1GB). block_size and member_size may be suffixed with the letters + * g, m, or k for GB, MB, or KB respectively. + * + * The h5repart utility is described on the Tools page of the HDF5 Reference Manual. + * + * An existing HDF5 file can be split into a family of files by running the file through split(1) on a + * UNIX system and numbering the output files. However, the HDF5 Library is lazy about + * extending the size of family members, so a valid file cannot generally be created by + * concatenation of the family members. + * + * Splitting the file and rejoining the segments by concatenation (split(1) and cat(1) on UNIX + * systems) does not generate files with holes; holes are preserved only through the use of h5repart. + * + * \subsubsection subsubsec_file_alternate_drivers_multi The Multi Driver + * In some circumstances, it is useful to separate metadata from raw data and some types of + * metadata from other types of metadata. Situations that would benefit from use of the Multi driver + * include the following: + *
    • In networked situations where the small metadata files can be kept on local disks but + * larger raw data files must be stored on remote media
    • + *
    • In cases where the raw data is extremely large
    • + *
    • In situations requiring frequent access to metadata held in RAM while the raw data + * can be efficiently held on disk
    + * + * In either case, access to the metadata is substantially easier with the smaller, and possibly more + * localized, metadata files. This often results in improved application performance. + * + * The Multi driver, #H5FD_MULTI, provides a mechanism for segregating raw data and different + * types of metadata into multiple files. The functions #H5Pset_fapl_multi and + * #H5Pget_fapl_multi are used to manage access properties for these multiple files. See the example + * below. + * + * Managing access properties for multiple files + * \code + * herr_t H5Pset_fapl_multi (hid_t fapl_id, const H5FD_mem_t *memb_map, const hid_t *memb_fapl, + * const char * const *memb_name, const haddr_t *memb_addr, + * hbool_t relax) + * herr_t H5Pget_fapl_multi (hid_t fapl_id, const H5FD_mem_t *memb_map, const hid_t *memb_fapl, + * const char **memb_name, const haddr_t *memb_addr, hbool_t *relax) + * \endcode + * + * #H5Pset_fapl_multi sets the file access properties to use the Multi driver; any previously defined + * driver properties are erased from the property list. With the Multi driver invoked, the application + * will provide a base name to #H5Fopen or #H5Fcreate. The files will be named by that base name as + * modified by the rule indicated in memb_name. File access will be governed by the file access + * property list memb_properties. + * + * See #H5Pset_fapl_multi and #H5Pget_fapl_multi in the HDF5 Reference Manual for descriptions + * of these functions and their usage. + * + * Additional parameters may be added to these functions in the future. + * + * \subsubsection subsubsec_file_alternate_drivers_split The Split Driver + * The Split driver, H5FD_SPLIT, is a limited case of the Multi driver where only two files are + * created. One file holds metadata, and the other file holds raw data. + * The function #H5Pset_fapl_split is used to manage Split file access properties. See the example + * below. + * + * Managing access properties for split files + * \code + * herr_t H5Pset_fapl_split (hid_t access_properties, const char *meta_extension, + * hid_t meta_properties,const char *raw_extension, hid_t raw_properties) + * \endcode + * + * #H5Pset_fapl_split sets the file access properties to use the Split driver; any previously defined + * driver properties are erased from the property list. + * + * With the Split driver invoked, the application will provide a base file name such as file_name to + * #H5Fcreate or #H5Fopen. The metadata and raw data files in storage will then be named + * file_name.meta_extension and file_name.raw_extension, respectively. For example, if + * meta_extension is defined as .meta and raw_extension is defined as .raw, the final filenames will + * be file_name.meta and file_name.raw. + * + * Each file can have its own file access property list. This allows the creative use of other lowlevel + * file drivers. For instance, the metadata file can be held in RAM and accessed via the + * Memory driver while the raw data file is stored on disk and accessed via the POSIX driver. + * Metadata file access will be governed by the file access property list in meta_properties. Raw + * data file access will be governed by the file access property list in raw_properties. + * + * Additional parameters may be added to these functions in the future. Since there are no + * additional variable settings associated with the Split driver, there is no H5Pget_fapl_split + * function. + * + * \subsubsection subsubsec_file_alternate_drivers_par The Parallel Driver + * Parallel environments require a parallel low-level driver. HDF5’s default driver for parallel + * systems is called the Parallel driver, #H5FD_MPIO. This driver uses the MPI standard for both + * communication and file I/O. + * + * The functions #H5Pset_fapl_mpio and #H5Pget_fapl_mpio are used to manage file access + * properties for the #H5FD_MPIO driver. See the example below. + * + * Managing parallel file access properties + * \code + * herr_t H5Pset_fapl_mpio (hid_t fapl_id, MPI_Comm comm, MPI_info info) + * herr_t H5Pget_fapl_mpio (hid_t fapl_id, MPI_Comm *comm, MPI_info *info) + * \endcode + * + * The file access properties managed by #H5Pset_fapl_mpio and retrieved by + * #H5Pget_fapl_mpio are the MPI communicator, comm, and the MPI info object, info. comm and + * info are used for file open. info is an information object much like an HDF5 property list. Both + * are defined in MPI_FILE_OPEN of MPI-2. + * + * The communicator and the info object are saved in the file access property list fapl_id. + * fapl_id can then be passed to MPI_FILE_OPEN to create and/or open the file. + * + * #H5Pset_fapl_mpio and #H5Pget_fapl_mpio are available only in the parallel HDF5 Library and + * are not collective functions. The Parallel driver is available only in the parallel HDF5 Library. + * + * Additional parameters may be added to these functions in the future. + * + * \subsection subsec_file_examples Code Examples for Opening and Closing Files + * \subsubsection subsubsec_file_examples_trunc Example Using the H5F_ACC_TRUNC Flag + * The following example uses the #H5F_ACC_TRUNC flag when it creates a new file. The default + * file creation and file access properties are also used. Using #H5F_ACC_TRUNC means the + * function will look for an existing file with the name specified by the function. In this case, that + * name is FILE. If the function does not find an existing file, it will create one. If it does find an + * existing file, it will empty the file in preparation for a new set of data. The identifier for the + * "new" file will be passed back to the application program. + * For more information, @see @ref subsec_file_access_modes. + * + * Creating a file with default creation and access properties + * \code + * hid_t file; // identifier + * + * // Create a new file using H5F_ACC_TRUNC access, default + * // file creation properties, and default file access + * // properties. + * file = H5Fcreate(FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + * + * // Close the file. + * status = H5Fclose(file); + * \endcode + * + * \subsubsection subsubsec_file_examples_props Example with the File Creation Property List + * The example below shows how to create a file with 64-bit object offsets and lengths. + * + * Creating a file with 64-bit offsets + * \code + * hid_t create_plist; + * hid_t file_id; + * + * create_plist = H5Pcreate(H5P_FILE_CREATE); + * H5Pset_sizes(create_plist, 8, 8); + * file_id = H5Fcreate(“test.h5”, H5F_ACC_TRUNC, create_plist, H5P_DEFAULT); + * . + * . + * . + * + * H5Fclose(file_id); + * \endcode + * + * \subsubsection subsubsec_file_examples_access Example with the File Access Property List + * This example shows how to open an existing file for independent datasets access by MPI parallel + * I/O: + * + * Opening an existing file for parallel I/O + * \code + * hid_t access_plist; + * hid_t file_id; + * + * access_plist = H5Pcreate(H5P_FILE_ACCESS); + * H5Pset_fapl_mpi(access_plist, MPI_COMM_WORLD, MPI_INFO_NULL); + * + * // H5Fopen must be called collectively + * file_id = H5Fopen(“test.h5”, H5F_ACC_RDWR, access_plist); + * . + * . + * . + * + * // H5Fclose must be called collectively + * H5Fclose(file_id); + * \endcode + * + * \subsection subsec_file_multiple Working with Multiple HDF5 Files + * Multiple HDF5 files can be associated so that the files can be worked with as though all the + * information is in a single HDF5 file. A temporary association can be set up by means of the + * #H5Fmount function. A permanent association can be set up by means of the external link + * function #H5Lcreate_external. + * + * The purpose of this section is to describe what happens when the #H5Fmount function is used to + * mount one file on another. + * + * When a file is mounted on another, the mounted file is mounted at a group, and the root group of + * the mounted file takes the place of that group until the mounted file is unmounted or until the + * files are closed. + * + * The figure below shows two files before one is mounted on the other. File1 has two groups and + * three datasets. The group that is the target of the A link has links, Z and Y, to two of the datasets. + * The group that is the target of the B link has a link, W, to the other dataset. File2 has three + * groups and three datasets. The groups in File2 are the targets of the AA, BB, and CC links. The + * datasets in File2 are the targets of the ZZ, YY, and WW links. + * + * + * + * + * + *
    + * \image html Files_fig3.gif "Two separate files" + *
    + * + * The figure below shows the two files after File2 has been mounted File1 at the group that is the + * target of the B link. + * + * + * + * + * + *
    + * \image html Files_fig4.gif "File2 mounted on File1" + *
    + * + * Note: In the figure above, the dataset that is the target of the W link is not shown. That dataset is + * masked by the mounted file. + * + * If a file is mounted on a group that has members, those members are hidden until the mounted + * file is unmounted. There are two ways around this if you need to work with a group member. + * One is to mount the file on an empty group. Another is to open the group member before you + * mount the file. Opening the group member will return an identifier that you can use to locate the + * group member. + * + * The example below shows how #H5Fmount might be used to mount File2 onto File1. + * + * Using H5Fmount + * \code + * status = H5Fmount(loc_id, "/B", child_id, plist_id) + * \endcode + * + * Note: In the code example above, loc_id is the file identifier for File1, /B is the link path to the + * group where File2 is mounted, child_id is the file identifier for File2, and plist_id is a property + * list identifier. + * For more information, @see @ref sec_group. + * + * See the entries for #H5Fmount, #H5Funmount, and #H5Lcreate_external in the HDF5 Reference Manual. + * + * Previous Chapter \ref sec_program - Next Chapter \ref sec_group + * + */ + +/** + * \defgroup H5F Files (H5F) * * Use the functions in this module to manage HDF5 files. * diff --git a/src/H5Gmodule.h b/src/H5Gmodule.h index 93e7184..defa5fa 100644 --- a/src/H5Gmodule.h +++ b/src/H5Gmodule.h @@ -28,7 +28,929 @@ #define H5_MY_PKG H5G #define H5_MY_PKG_ERR H5E_SYM -/** \defgroup H5G H5G +/** \page H5G_UG HDF5 Groups + * + * \section sec_group HDF5 Groups + * \subsection subsec_group_intro Introduction + * As suggested by the name Hierarchical Data Format, an HDF5 file is hierarchically structured. + * The HDF5 group and link objects implement this hierarchy. + * + * In the simple and most common case, the file structure is a tree structure; in the general case, the + * file structure may be a directed graph with a designated entry point. The tree structure is very + * similar to the file system structures employed on UNIX systems, directories and files, and on + * Apple and Microsoft Windows systems, folders and files. HDF5 groups are analogous + * to the directories and folders; HDF5 datasets are analogous to the files. + * + * The one very important difference between the HDF5 file structure and the above-mentioned file + * system analogs is that HDF5 groups are linked as a directed graph, allowing circular references; + * the file systems are strictly hierarchical, allowing no circular references. The figures below + * illustrate the range of possibilities. + * + * In the first figure below, the group structure is strictly hierarchical, identical to the file system + * analogs. + * + * In the next two figures below, the structure takes advantage of the directed graph’s allowance of + * circular references. In the second figure, GroupA is not only a member of the root group, /, but a + * member of GroupC. Since Group C is a member of Group B and Group B is a member of Group + * A, Dataset1 can be accessed by means of the circular reference /Group A/Group B/Group + * C/Group A/Dataset1. The third figure below illustrates an extreme case in which GroupB is a + * member of itself, enabling a reference to a member dataset such as /Group A/Group B/Group + * B/Group B/Dataset2. + * + * + * + * + * + *
    + * \image html Groups_fig1.gif "A file with a strictly hierarchical group structure" + *
    + * + * + * + * + * + *
    + * \image html Groups_fig2.gif "A file with a circular reference" + *
    + * + * + * + * + * + *
    + * \image html Groups_fig3.gif "A file with one group as a member of itself" + *
    + * + * As becomes apparent upon reflection, directed graph structures can become quite complex; + * caution is advised! + * + * The balance of this chapter discusses the following topics: + * \li The HDF5 group object (or a group) and its structure in more detail + * \li HDF5 link objects (or links) + * \li The programming model for working with groups and links + * \li HDF5 functions provided for working with groups, group members, and links + * \li Retrieving information about objects in a group + * \li Discovery of the structure of an HDF5 file and the contained objects + * \li Examples of file structures + * + * \subsection subsec_group_descr Description of the Group Object + * \subsubsection subsubsec_group_descr_object The Group Object + * Abstractly, an HDF5 group contains zero or more objects and every object must be a member of + * at least one group. The root group, the sole exception, may not belong to any group. + * + * + * + * + * + *
    + * \image html Groups_fig4.gif "Abstract model of the HDF5 group object" + *
    + * + * Group membership is actually implemented via link objects. See the figure above. A link object + * is owned by a group and points to a named object. Each link has a name, and each link points to + * exactly one object. Each named object has at least one and possibly many links to it. + * + * There are three classes of named objects: group, dataset, and committed datatype (formerly + * called named datatype). See the figure below. Each of these objects is the member of at least one + * group, which means there is at least one link to it. + * + * + * + * + * + *
    + * \image html Groups_fig5.gif "Classes of named objects" + *
    + * + * The primary operations on a group are to add and remove members and to discover member + * objects. These abstract operations, as listed in the figure below, are implemented in the \ref H5G + * APIs. For more information, @see @ref subsec_group_function. + * + * To add and delete members of a group, links from the group to existing objects in the file are + * created and deleted with the link and unlink operations. When a new named object is created, the + * HDF5 Library executes the link operation in the background immediately after creating the + * object (in other words, a new object is added as a member of the group in which it is created + * without further user intervention). + * + * Given the name of an object, the get_object_info method retrieves a description of the object, + * including the number of references to it. The iterate method iterates through the members of the + * group, returning the name and type of each object. + * + * + * + * + * + *
    + * \image html Groups_fig6.gif "The group object" + *
    + * + * Every HDF5 file has a single root group, with the name /. The root group is identical to any other + * HDF5 group, except: + * \li The root group is automatically created when the HDF5 file is created (#H5Fcreate). + * \li The root group has no parent, but by convention has a reference count of 1. + * \li The root group cannot be deleted (in other words, unlinked)! + * + * \subsubsection subsubsec_group_descr_model The Hierarchy of Data Objects + * An HDF5 file is organized as a rooted, directed graph using HDF5 group objects. The named + * data objects are the nodes of the graph, and the links are the directed arcs. Each arc of the graph + * has a name, with the special name / reserved for the root group. New objects are created and then + * inserted into the graph with a link operation that is automatically executed by the library; + * existing objects are inserted into the graph with a link operation explicitly called by the user, + * which creates a named link from a group to the object. + * + * An object can be the target of more than one link. + * + * The names on the links must be unique within each group, but there may be many links with the + * same name in different groups. These are unambiguous, because some ancestor must have a + * different name, or else they are the same object. The graph is navigated with path names, + * analogous to Unix file systems. For more information, @see @ref subsubsec_group_descr_path. + * + * An object can be opened with a full path starting at the root group, or with a relative path and a + * starting point. That starting point is always a group, though it may be the current working group, + * another specified group, or the root group of the file. Note that all paths are relative to a single + * HDF5 file. In this sense, an HDF5 file is analogous to a single UNIX file system. + * + * It is important to note that, just like the UNIX file system, HDF5 objects do not have names, the + * names are associated with paths. An object has an object identifier that is unique within the file, + * but a single object may have many names because there may be many paths to the same object. + * An object can be renamed, or moved to another group, by adding and deleting links. In this case, + * the object itself never moves. For that matter, membership in a group has no implication for the + * physical location of the stored object. + * + * Deleting a link to an object does not necessarily delete the object. The object remains available + * as long as there is at least one link to it. After all links to an object are deleted, it can no longer + * be opened, and the storage may be reclaimed. + * + * It is also important to realize that the linking mechanism can be used to construct very complex + * graphs of objects. For example, it is possible for an object to be shared between several groups + * and even to have more than one name in the same group. It is also possible for a group to be a + * member of itself, or to create other cycles in the graph, such as in the case where a child group is + * linked to one of its ancestors. + * + * HDF5 also has soft links similar to UNIX soft links. A soft link is an object that has a name and + * a path name for the target object. The soft link can be followed to open the target of the link just + * like a regular or hard link. The differences are that the hard link cannot be created if the target + * object does not exist and it always points to the same object. A soft link can be created with any + * path name, whether or not the object exists; it may or may not, therefore, be possible to follow a + * soft link. Furthermore, a soft link’s target object may be changed. + * + * \subsubsection subsubsec_group_descr_path HDF5 Path Names + * The structure of the HDF5 file constitutes the name space for the objects in the file. A path name + * is a string of components separated by slashes (/). Each component is the name of a hard or soft + * link which points to an object in the file. The slash not only separates the components, but + * indicates their hierarchical relationship; the component indicated by the link name following a + * slash is a always a member of the component indicated by the link name preceding that slash. + * + * The first component in the path name may be any of the following: + * \li The special character dot (., a period), indicating the current group + * \li The special character slash (/), indicating the root group + * \li Any member of the current group + * + * Component link names may be any string of ASCII characters not containing a slash or a dot + * (/ and ., which are reserved as noted above). However, users are advised to avoid the use of + * punctuation and non-printing characters, as they may create problems for other software. The + * figure below provides a BNF grammar for HDF5 path names. + * + * A BNF grammar for HDF5 path names + * \code + * PathName ::= AbsolutePathName | RelativePathName + * Separator ::= "/" ["/"]* + * AbsolutePathName ::= Separator [ RelativePathName ] + * RelativePathName ::= Component [ Separator RelativePathName ]* + * Component ::= "." | Characters + * Characters ::= Character+ - { "." } + * Character ::= {c: c Î { { legal ASCII characters } - {'/'} } + * \endcode + * + * An object can always be addressed by either a full or an absolute path name, starting at the root + * group, or by a relative path name, starting in a known location such as the current working + * group. As noted elsewhere, a given object may have multiple full and relative path names. + * + * Consider, for example, the file illustrated in the figure below. Dataset1 can be identified by either + * of these absolute path names: + * /GroupA/Dataset1 + * + * /GroupA/GroupB/GroupC/Dataset1 + * + * Since an HDF5 file is a directed graph structure, and is therefore not limited to a strict tree + * structure, and since this illustrated file includes the sort of circular reference that a directed graph + * enables, Dataset1 can also be identified by this absolute path name: + * /GroupA/GroupB/GroupC/GroupA/Dataset1 + * + * Alternatively, if the current working location is GroupB, Dataset1 can be identified by either of + * these relative path names: + * GroupC/Dataset1 + * + * GroupC/GroupA/Dataset1 + * + * Note that relative path names in HDF5 do not employ the ../ notation, the UNIX notation + * indicating a parent directory, to indicate a parent group. + * + * + * + * + * + *
    + * \image html Groups_fig2.gif "A file with a circular reference" + *
    + * + * \subsubsection subsubsec_group_descr_impl Group Implementations in HDF5 + * The original HDF5 group implementation provided a single indexed structure for link storage. A + * new group implementation, as of HDF5 Release 1.8.0, enables more efficient compact storage + * for very small groups, improved link indexing for large groups, and other advanced features. + *
      + *
    • The original indexed format remains the default. Links are stored in a B-tree in the + * group’s local heap.
    • + *
    • Groups created in the new compact-or-indexed format, the implementation introduced + * with Release 1.8.0, can be tuned for performance, switching between the compact and + * indexed formats at thresholds set in the user application. + *
        + *
      • The compact format will conserve file space and processing overhead when + * working with small groups and is particularly valuable when a group contains + * no links. Links are stored as a list of messages in the group’s header.
      • + *
      • The indexed format will yield improved performance when working with large + * groups. A large group may contain thousands to millions of members. Links + * are stored in a fractal heap and indexed with an improved B-tree.
      • + *
    • + *
    • The new implementation also enables the use of link names consisting of non-ASCII + * character sets (see #H5Pset_char_encoding) and is required for all link types other than + * hard or soft links; the link types other than hard or soft links are external links and + * user-defined links @see @ref H5L APIs.
    • + *
    + * + * The original group structure and the newer structures are not directly interoperable. By default, a + * group will be created in the original indexed format. An existing group can be changed to a + * compact-or-indexed format if the need arises; there is no capability to change back. As stated + * above, once in the compact-or-indexed format, a group can switch between compact and indexed + * as needed. + * + * Groups will be initially created in the compact-or-indexed format only when one or more of the + * following conditions is met: + *
      + *
    • The low version bound value of the library version bounds property has been set to + * Release 1.8.0 or later in the file access property list (see #H5Pset_libver_bounds). + * Currently, that would require an #H5Pset_libver_bounds call with the low parameter + * set to #H5F_LIBVER_LATEST. + * + * When this property is set for an HDF5 file, all objects in the file will be created using + * the latest available format; no effort will be made to create a file that can be read by + * older libraries.
    • + *
    • The creation order tracking property, #H5P_CRT_ORDER_TRACKED, has been set + * in the group creation property list (see #H5Pset_link_creation_order).
    • + *
    + * + * An existing group, currently in the original indexed format, will be converted to the compact-or- + * indexed format upon the occurrence of any of the following events: + *
      + *
    • An external or user-defined link is inserted into the group. + *
    • A link named with a string composed of non-ASCII characters is inserted into the + * group. + *
    + * + * The compact-or-indexed format offers performance improvements that will be most notable at + * the extremes (for example, in groups with zero members and in groups with tens of thousands of + * members). But measurable differences may sometimes appear at a threshold as low as eight + * group members. Since these performance thresholds and criteria differ from application to + * application, tunable settings are provided to govern the switch between the compact and indexed + * formats (see #H5Pset_link_phase_change). Optimal thresholds will depend on the application and + * the operating environment. + * + * Future versions of HDF5 will retain the ability to create, read, write, and manipulate all groups + * stored in either the original indexed format or the compact-or-indexed format. + * + * \subsection subsec_group_h5dump Using h5dump + * You can use h5dump, the command-line utility distributed with HDF5, to examine a file for + * purposes either of determining where to create an object within an HDF5 file or to verify that + * you have created an object in the intended place. + * + * In the case of the new group created later in this chapter, the following h5dump command will + * display the contents of FileA.h5: + * \code + * h5dump FileA.h5 + * \endcode + * + * For more information, @see @ref subsubsec_group_program_create. + * + * Assuming that the discussed objects, GroupA and GroupB are the only objects that exist in + * FileA.h5, the output will look something like the following: + * \code + * HDF5 "FileA.h5" { + * GROUP "/" { + * GROUP GroupA { + * GROUP GroupB { + * } + * } + * } + * } + * \endcode + * + * h5dump is described on the “HDF5 Tools” page of the \ref RM. + * + * The HDF5 DDL grammar is described in the @ref DDLBNF110. + * + * \subsection subsec_group_function Group Function Summaries + * Functions that can be used with groups (\ref H5G functions) and property list functions that can used + * with groups (\ref H5P functions) are listed below. A number of group functions have been + * deprecated. Most of these have become link (\ref H5L) or object (\ref H5O) functions. These replacement + * functions are also listed below. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    Group functions
    FunctionPurpose
    #H5GcreateCreates a new empty group and gives it a name. The + * C function is a macro: \see \ref api-compat-macros.
    #H5Gcreate_anonCreates a new empty group without linking it into the file structure.
    #H5GopenOpens an existing group for modification and returns a group identifier for that group. + * The C function is a macro: \see \ref api-compat-macros.
    #H5GcloseCloses the specified group.
    #H5Gget_create_plistGets a group creation property list identifier.
    #H5Gget_infoRetrieves information about a group. Use instead of H5Gget_num_objs.
    #H5Gget_info_by_idxRetrieves information about a group according to the group’s position within an index.
    #H5Gget_info_by_nameRetrieves information about a group.
    + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    Link and object functions
    FunctionPurpose
    #H5Lcreate_hardCreates a hard link to an object. Replaces H5Glink and H5Glink2.
    #H5Lcreate_softCreates a soft link to an object. Replaces H5Glink and H5Glink2.
    #H5Lcreate_externalCreates a soft link to an object in a different file. Replaces H5Glink and H5Glink2.
    #H5Lcreate_udCreates a link of a user-defined type.
    #H5Lget_valReturns the value of a symbolic link. Replaces H5Gget_linkval.
    #H5LiterateIterates through links in a group. Replaces H5Giterate. + * See also #H5Ovisit and #H5Lvisit.
    #H5Literate_by_nameIterates through links in a group.
    #H5LvisitRecursively visits all links starting from a specified group.
    #H5OvisitRecursively visits all objects accessible from a specified object.
    #H5Lget_infoReturns information about a link. Replaces H5Gget_objinfo.
    #H5Oget_infoRetrieves the metadata for an object specified by an identifier. Replaces H5Gget_objinfo.
    #H5Lget_name_by_idxRetrieves name of the nth link in a group, according to the order within a specified field + * or index. Replaces H5Gget_objname_by_idx.
    #H5Oget_info_by_idxRetrieves the metadata for an object, identifying the object by an index position. Replaces + * H5Gget_objtype_by_idx.
    #H5Oget_info_by_nameRetrieves the metadata for an object, identifying the object by location and relative name.
    #H5Oset_commentSets the comment for specified object. Replaces H5Gset_comment.
    #H5Oget_commentGets the comment for specified object. Replaces H5Gget_comment.
    #H5LdeleteRemoves a link from a group. Replaces H5Gunlink.
    #H5LmoveRenames a link within an HDF5 file. Replaces H5Gmove and H5Gmove2.
    + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    Group creation property list functions
    FunctionPurpose
    #H5Pall_filters_availVerifies that all required filters are available.
    #H5Pget_filterReturns information about a filter in a pipeline. The + * C function is a macro: \see \ref api-compat-macros.
    #H5Pget_filter_by_idReturns information about the specified filter. The + * C function is a macro: \see \ref api-compat-macros.
    #H5Pget_nfiltersReturns the number of filters in the pipeline.
    #H5Pmodify_filterModifies a filter in the filter pipeline.
    #H5Premove_filterDeletes one or more filters in the filter pipeline.
    #H5Pset_deflateSets the deflate (GNU gzip) compression method and compression level.
    #H5Pset_filterAdds a filter to the filter pipeline.
    #H5Pset_fletcher32Sets up use of the Fletcher32 checksum filter.
    #H5Pset_link_phase_changeSets the parameters for conversion between compact and dense groups.
    #H5Pget_link_phase_changeQueries the settings for conversion between compact and dense groups.
    #H5Pset_est_link_infoSets estimated number of links and length of link names in a group.
    #H5Pget_est_link_infoQueries data required to estimate required local heap or object header size.
    #H5Pset_nlinksSets maximum number of soft or user-defined link traversals.
    #H5Pget_nlinksRetrieves the maximum number of link traversals.
    #H5Pset_link_creation_orderSets creation order tracking and indexing for links in a group.
    #H5Pget_link_creation_orderQueries whether link creation order is tracked and/or indexed in a group.
    #H5Pset_create_intermediate_groupSpecifies in the property list whether to create missing intermediate groups.
    #H5Pget_create_intermediate_groupDetermines whether the property is set to enable creating missing intermediate groups.
    #H5Pset_char_encodingSets the character encoding used to encode a string. Use to set ASCII or UTF-8 character + * encoding for object names.
    #H5Pget_char_encodingRetrieves the character encoding used to create a string.
    + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    Other external link functions
    FunctionPurpose
    #H5Pset_elink_file_cache_sizeSets the size of the external link open file cache from the specified + * file access property list.
    #H5Pget_elink_file_cache_sizeRetrieves the size of the external link open file cache from the specified + * file access property list.
    #H5Fclear_elink_file_cacheClears the external link open file cache for a file.
    + * + * \subsection subsec_group_program Programming Model for Groups + * The programming model for working with groups is as follows: + *
    1. Create a new group or open an existing one.
    2. + *
    3. Perform the desired operations on the group. + *
      • Create new objects in the group.
      • + *
      • Insert existing objects as group members.
      • + *
      • Delete existing members.
      • + *
      • Open and close member objects.
      • + *
      • Access information regarding member objects.
      • + *
      • Iterate across group members.
      • + *
      • Manipulate links.
      + *
    4. Terminate access to the group (Close the group).
    + * + * \subsubsection subsubsec_group_program_create Creating a Group + * To create a group, use #H5Gcreate, specifying the location and the path of the new group. The + * location is the identifier of the file or the group in a file with respect to which the new group is to + * be identified. The path is a string that provides either an absolute path or a relative path to the + * new group. For more information, @see @ref subsubsec_group_descr_path. + * + * A path that begins with a slash (/) is + * an absolute path indicating that it locates the new group from the root group of the HDF5 file. A + * path that begins with any other character is a relative path. When the location is a file, a relative + * path is a path from that file’s root group; when the location is a group, a relative path is a path + * from that group. + * + * The sample code in the example below creates three groups. The group Data is created in the + * root directory; two groups are then created in /Data, one with absolute path, the other with a + * relative path. + * + * Creating three new groups + * \code + * hid_t file; + * file = H5Fopen(....); + * + * group = H5Gcreate(file, "/Data", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + * group_new1 = H5Gcreate(file, "/Data/Data_new1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + * group_new2 = H5Gcreate(group, "Data_new2", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + * \endcode + * The third #H5Gcreate parameter optionally specifies how much file space to reserve to store the + * names that will appear in this group. If a non-positive value is supplied, a default size is chosen. + * + * \subsubsection subsubsec_group_program_open Opening a Group and Accessing an Object in that Group + * Though it is not always necessary, it is often useful to explicitly open a group when working + * with objects in that group. Using the file created in the example above, the example below + * illustrates the use of a previously-acquired file identifier and a path relative to that file to open + * the group Data. + * + * Any object in a group can be also accessed by its absolute or relative path. To open an object + * using a relative path, an application must first open the group or file on which that relative path + * is based. To open an object using an absolute path, the application can use any location identifier + * in the same file as the target object; the file identifier is commonly used, but object identifier for + * any object in that file will work. Both of these approaches are illustrated in the example below. + * + * Using the file created in the examples above, the example below provides sample code + * illustrating the use of both relative and absolute paths to access an HDF5 data object. The first + * sequence (two function calls) uses a previously-acquired file identifier to open the group Data, + * and then uses the returned group identifier and a relative path to open the dataset CData. The + * second approach (one function call) uses the same previously-acquired file identifier and an + * absolute path to open the same dataset. + * + * Open a dataset with relative and absolute paths + * \code + * group = H5Gopen(file, "Data", H5P_DEFAULT); + * + * dataset1 = H5Dopen(group, "CData", H5P_DEFAULT); + * dataset2 = H5Dopen(file, "/Data/CData", H5P_DEFAULT); + * \endcode + * + * \subsubsection subsubsec_group_program_dataset Creating a Dataset in a Specific Group + * Any dataset must be created in a particular group. As with groups, a dataset may be created in a + * particular group by specifying its absolute path or a relative path. The example below illustrates + * both approaches to creating a dataset in the group /Data. + * + * Create a dataset with absolute and relative paths + * \code + * dataspace = H5Screate_simple(RANK, dims, NULL); + * dataset1 = H5Dcreate(file, "/Data/CData", H5T_NATIVE_INT, dataspace, + * H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + * group = H5Gopen(file, "Data", H5P_DEFAULT); + * dataset2 = H5Dcreate(group, "Cdata2", H5T_NATIVE_INT, dataspace, + * H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + * \endcode + * + * \subsubsection subsubsec_group_program_close Closing a Group + * To ensure the integrity of HDF5 objects and to release system resources, an application should + * always call the appropriate close function when it is through working with an HDF5 object. In + * the case of groups, H5Gclose ends access to the group and releases any resources the HDF5 + * library has maintained in support of that access, including the group identifier. + * + * As illustrated in the example below, all that is required for an H5Gclose call is the group + * identifier acquired when the group was opened; there are no relative versus absolute path + * considerations. + * + * Close a group + * \code + * herr_t status; + * + * status = H5Gclose(group); + * \endcode + * + * A non-negative return value indicates that the group was successfully closed and the resources + * released; a negative return value indicates that the attempt to close the group or release resources + * failed. + * + * \subsubsection subsubsec_group_program_links Creating Links + * As previously mentioned, every object is created in a specific group. Once created, an object can + * be made a member of additional groups by means of links created with one of the H5Lcreate_* + * functions. + * + * A link is, in effect, a path by which the target object can be accessed; it therefore has a name + * which functions as a single path component. A link can be removed with an #H5Ldelete call, + * effectively removing the target object from the group that contained the link (assuming, of + * course, that the removed link was the only link to the target object in the group). + * + *

    Hard Links

    + * There are two kinds of links, hard links and symbolic links. Hard links are reference counted; + * symbolic links are not. When an object is created, a hard link is automatically created. An object + * can be deleted from the file by removing all the hard links to it. + * + * Working with the file from the previous examples, the code in the example below illustrates the + * creation of a hard link, named Data_link, in the root group, /, to the group Data. Once that link is + * created, the dataset Cdata can be accessed via either of two absolute paths, /Data/Cdata or + * /Data_Link/Cdata. + * + * Create a hard link + * \code + * status = H5Lcreate_hard(Data_loc_id, "Data", DataLink_loc_id, "Data_link", H5P_DEFAULT, H5P_DEFAULT); + * + * dataset1 = H5Dopen(file, "/Data_link/CData", H5P_DEFAULT); + * dataset2 = H5Dopen(file, "/Data/CData", H5P_DEFAULT); + * \endcode + * + * The example below shows example code to delete a link, deleting the hard link Data from the + * root group. The group /Data and its members are still in the file, but they can no longer be + * accessed via a path using the component /Data. + * + * Delete a link + * \code + * status = H5Ldelete(Data_loc_id, "Data", H5P_DEFAULT); + * + * dataset1 = H5Dopen(file, "/Data_link/CData", H5P_DEFAULT); + * // This call should succeed; all path components still exist + * dataset2 = H5Dopen(file, "/Data/CData", H5P_DEFAULT); + * // This call will fail; the path component '/Data' has been deleted. + * \endcode + * + * When the last hard link to an object is deleted, the object is no longer accessible. #H5Ldelete will + * not prevent you from deleting the last link to an object. To see if an object has only one link, use + * the #H5Oget_info function. If the value of the rc (reference count) field in the is greater than 1, + * then the link can be deleted without making the object inaccessible. + * + * The example below shows #H5Oget_info to the group originally called Data. + * + * Finding the number of links to an object + * \code + * status = H5Oget_info(Data_loc_id, object_info); + * \endcode + * + * It is possible to delete the last hard link to an object and not make the object inaccessible. + * Suppose your application opens a dataset, and then deletes the last hard link to the dataset. While + * the dataset is open, your application still has a connection to the dataset. If your application + * creates a hard link to the dataset before it closes the dataset, then the dataset will still be + * accessible. + * + *

    Symbolic Links

    + * Symbolic links are objects that assign a name in a group to a path. Notably, the target object is + * determined only when the symbolic link is accessed, and may, in fact, not exist. Symbolic links + * are not reference counted, so there may be zero, one, or more symbolic links to an object. + * + * The major types of symbolic links are soft links and external links. Soft links are symbolic links + * within an HDF5 file and are created with the #H5Lcreate_soft function. Symbolic links to objects + * located in external files, in other words external links, can be created with the + * #H5Lcreate_external function. Symbolic links are removed with the #H5Ldelete function. + * + * The example below shows the creating two soft links to the group /Data. + * + * Create a soft link + * \code + * status = H5Lcreate_soft(path_to_target, link_loc_id, "Soft2", H5P_DEFAULT, H5P_DEFAULT); + * status = H5Lcreate_soft(path_to_target, link_loc_id, "Soft3", H5P_DEFAULT, H5P_DEFAULT); + * dataset = H5Dopen(file, "/Soft2/CData", H5P_DEFAULT); + * \endcode + * + * With the soft links defined in the example above, the dataset CData in the group /Data can now + * be opened with any of the names /Data/CData, /Soft2/CData, or /Soft3/CData. + * + * In release 1.8.7, a cache was added to hold the names of files accessed via external links. The + * size of this cache can be changed to help improve performance. For more information, see the + * entry in the \ref RM for the #H5Pset_elink_file_cache_size function call. + * + *

    Note Regarding Hard Links and Soft Links

    + * Note that an object’s existence in a file is governed by the presence of at least one hard link to + * that object. If the last hard link to an object is removed, the object is removed from the file and + * any remaining soft link becomes a dangling link, a link whose target object does not exist. + * + *

    Moving or Renaming Objects, and a Warning

    + * An object can be renamed by changing the name of a link to it with #H5Lmove. This has the same + * effect as creating a new link with the new name and deleting the link with the old name. + * + * Exercise caution in the use of #H5Lmove and #H5Ldelete as these functions each include a step + * that unlinks a pointer to an HDF5 object. If the link that is removed is on the only path leading to + * an HDF5 object, that object will become permanently inaccessible in the file. + * + *
    Scenario 1: Removing the Last Link
    + * To avoid removing the last link to an object or otherwise making an object inaccessible, use the + * #H5Oget_info function. Make sure that the value of the reference count field (rc) is greater than 1. + * + *
    Scenario 2: Moving a Link that Isolates an Object
    + * Consider the following example: assume that the group group2 can only be accessed via the + * following path, where top_group is a member of the file’s root group: + * /top_group/group1/group2/ + * + * Using #H5Lmove, top_group is renamed to be a member ofgroup2. At this point, since + * top_group was the only route from the root group to group1, there is no longer a path by which + * one can access group1, group2, or any member datasets. And since top_group is now a member + * of group2, top_group itself and any member datasets have thereby also become inaccessible. + * + *

    Mounting a File

    + * An external link is a permanent connection between two files. A temporary connection can be set + * up with the #H5Fmount function. For more information, @see sec_file. + * For more information, see the #H5Fmount function in the \ref RM. + * + * \subsubsection subsubsec_group_program_info Discovering Information about Objects + * There is often a need to retrieve information about a particular object. The #H5Lget_info and + * #H5Oget_info functions fill this niche by returning a description of the object or link in an + * #H5L_info_t or #H5O_info_t structure. + * + * \subsubsection subsubsec_group_program_objs Discovering Objects in a Group + * To examine all the objects or links in a group, use the #H5Literate or #H5Ovisit functions to + * examine the objects, and use the #H5Lvisit function to examine the links. #H5Literate is useful + * both with a single group and in an iterative process that examines an entire file or section of a + * file (such as the contents of a group or the contents of all the groups that are members of that + * group) and acts on objects as they are encountered. #H5Ovisit recursively visits all objects + * accessible from a specified object. #H5Lvisit recursively visits all the links starting from a + * specified group. + * + * \subsubsection subsubsec_group_program_all Discovering All of the Objects in the File + * The structure of an HDF5 file is self-describing, meaning that an application can navigate an + * HDF5 file to discover and understand all the objects it contains. This is an iterative process + * wherein the structure is traversed as a graph, starting at one node and recursively visiting linked + * nodes. To explore the entire file, the traversal should start at the root group. + * + * \subsection subsec_group_examples Examples of File Structures + * This section presents several samples of HDF5 file structures. + * + * Figure 9 shows examples of the structure of a file with three groups and one dataset. The file in + * part a contains three groups: the root group and two member groups. In part b, the dataset + * dset1 has been created in /group1. In part c, a link named dset2 from /group2 to the dataset has + * been added. Note that there is only one copy of the dataset; there are two links to it and it can be + * accessed either as /group1/dset1 or as /group2/dset2. + * + * Part d illustrates that one of the two links to the dataset can be deleted. In this case, the link from + * /group1 + * has been removed. The dataset itself has not been deleted; it is still in the file but can only be + * accessed as + * /group2/dset2 + * + * + * + * + * + * + * + * + * + * + * + *
    Figure 9 - Some file structures
    + * \image html Groups_fig9_a.gif "a) The file contains three groups: the root group, /group1, and /group2." + * + * \image html Groups_fig9_b.gif "b) The dataset dset1 (or /group1/dset1) is created in /group1." + *
    + * \image html Groups_fig9_aa.gif "c) A link named dset2 to the same dataset is created in /group2." + * + * \image html Groups_fig9_bb.gif "d) The link from /group1 to dset1 is removed. The dataset is + * still in the file, but can be accessed only as /group2/dset2." + *
    + * + * Figure 10 illustrates loops in an HDF5 file structure. The file in part a contains three groups + * and a dataset; group2 is a member of the root group and of the root group’s other member group, + * group1. group2 thus can be accessed by either of two paths: /group2 or /group1/GXX. Similarly, + * the dataset can be accessed either as /group2/dset1 or as /group1/GXX/dset1. + * + * Part b illustrates a different case: the dataset is a member of a single group but with two links, or + * names, in that group. In this case, the dataset again has two names, /group1/dset1 and + * /group1/dset2. + * + * In part c, the dataset dset1 is a member of two groups, one of which can be accessed by either of + * two names. The dataset thus has three path names: /group1/dset1, /group2/dset2, and + * /group1/GXX/dset2. + * + * And in part d, two of the groups are members of each other and the dataset is a member of both + * groups. In this case, there are an infinite number of paths to the dataset because GXX and + * GYY can be traversed any number of times on the way from the root group, /, to the dataset. This + * can yield a path name such as /group1/GXX/GYY/GXX/GYY/GXX/dset2. + * + * + * + * + * + * + * + * + * + * + * + *
    Figure 10 - More sample file structures
    + * \image html Groups_fig10_a.gif "a) dset1 has two names: /group2/dset1 and /group1/GXX/dset1." + * + * \image html Groups_fig10_b.gif "b) dset1 again has two names: /group1/dset1 and /group1/dset2." + *
    + * \image html Groups_fig10_c.gif "c) dset1 has three names: /group1/dset1, /group2/dset2, and + * /group1/GXX/dset2." + * + * \image html Groups_fig10_d.gif "d) dset1 has an infinite number of available path names." + *
    + * + * Figure 11 takes us into the realm of soft links. The original file, in part a, contains only three + * hard links. In part b, a soft link named dset2 from group2 to /group1/dset1 has been created, + * making this dataset accessible as /group2/dset2. + * + * In part c, another soft link has been created in group2. But this time the soft link, dset3, points + * to a target object that does not yet exist. That target object, dset, has been added in part d and is + * now accessible as either /group2/dset or /group2/dset3. + * + * It could be said that HDF5 extends the organizing concepts of a file system to the internal + * structure of a single file. + * + * + * + * + * + * + * + * + * + * + * + *
    Figure 11 - Hard and soft links
    + * \image html Groups_fig11_a.gif "a) The file contains only hard links." + * + * \image html Groups_fig11_b.gif "b) A soft link is added from group2 to /group1/dset1." + *
    + * \image html Groups_fig11_c.gif "c) A soft link named dset3 is added with a target that does not yet exist." + * + * \image html Groups_fig11_d.gif "d) The target of the soft link is created or linked." + *
    + * + * Previous Chapter \ref sec_file - Next Chapter \ref sec_dataset + * + */ + +/** + * \defgroup H5G Groups (H5G) * * Use the functions in this module to manage HDF5 groups. * diff --git a/src/H5Gpublic.h b/src/H5Gpublic.h index ce36b84..c659a83 100644 --- a/src/H5Gpublic.h +++ b/src/H5Gpublic.h @@ -175,7 +175,7 @@ H5_DLL hid_t H5Gcreate_async(const char *app_file, const char *app_func, unsigne * H5Gclose() when the group is no longer needed so that resource * leaks will not develop. * - * \see H5Olink(), H5Dcreate(), Using Identifiers + * \see H5Olink(), H5Gcreate() * * \since 1.8.0 * @@ -735,7 +735,7 @@ H5_DLL herr_t H5Glink2(hid_t cur_loc_id, const char *cur_name, H5G_link_t type, * * \attention Exercise care in moving groups as it is possible to render data in * a file inaccessible with H5Gmove(). See The Group Interface in the - * HDF5 User's Guide. + * \ref UG. * * \version 1.8.0 Function deprecated in this release. * @@ -766,7 +766,7 @@ H5_DLL herr_t H5Gmove(hid_t src_loc_id, const char *src_name, const char *dst_na * * \attention Exercise care in moving groups as it is possible to render data in * a file inaccessible with H5Gmove2(). See The Group Interface in the - * HDF5 User's Guide. + * \ref UG. * * \version 1.8.0 Function deprecated in this release. * @@ -803,11 +803,11 @@ H5_DLL herr_t H5Gmove2(hid_t src_loc_id, const char *src_name, hid_t dst_loc_id, * Note that space identified as freespace is available for re-use only * as long as the file remains open; once a file has been closed, the * HDF5 library loses track of freespace. See “Freespace Management” in - * the HDF5 User's Guide for further details. + * the \ref UG for further details. * * \attention Exercise care in moving groups as it is possible to render data in * a file inaccessible with H5Gunlink(). See The Group Interface in the - * HDF5 User's Guide. + * \ref UG. * * \version 1.8.0 Function deprecated in this release. * diff --git a/src/H5Imodule.h b/src/H5Imodule.h index cd1cbcd..9470cc9 100644 --- a/src/H5Imodule.h +++ b/src/H5Imodule.h @@ -28,7 +28,12 @@ #define H5_MY_PKG H5I #define H5_MY_PKG_ERR H5E_ID -/**\defgroup H5I H5I +/** \page H5I_UG The HDF5 Identifiers + * @todo Under Construction + */ + +/** + * \defgroup H5I Identifiers (H5I) * * Use the functions in this module to manage identifiers defined by the HDF5 * library. See \ref H5IUD for user-defined identifiers and identifier diff --git a/src/H5Lmodule.h b/src/H5Lmodule.h index d52690e..cbb5060 100644 --- a/src/H5Lmodule.h +++ b/src/H5Lmodule.h @@ -28,7 +28,12 @@ #define H5_MY_PKG H5L #define H5_MY_PKG_ERR H5E_LINK -/**\defgroup H5L H5L +/** \page H5L_UG The HDF5 Links + * @todo Under Construction + */ + +/** + * \defgroup H5L Links (H5L) * * Use the functions in this module to manage HDF5 links and link types. * diff --git a/src/H5Mmodule.h b/src/H5Mmodule.h index e8d7c89..920ec3d 100644 --- a/src/H5Mmodule.h +++ b/src/H5Mmodule.h @@ -25,10 +25,24 @@ #define H5_MY_PKG H5M #define H5_MY_PKG_ERR H5E_MAP -/**\defgroup H5M H5M +/** + * \page H5M_UG The HDF5 VOL Data Mapping + * \Bold{The HDF5 Data Mapping can only be used with the HDF5 VOL connectors that + * implement map objects.} The native HDF5 library does not support this feature. + * + * \section sec_map The HDF5 Map Object * * \todo Describe the map life cycle. * + * \todo How does MAPL fit into \ref subsubsec_plist_class. + * + * Previous Chapter \ref sec_async - Next Chapter \ref sec_addition + * + */ + +/** + * \defgroup H5M VOL Mapping (H5M) + * * \details \Bold{The interface can only be used with the HDF5 VOL connectors that * implement map objects.} The native HDF5 library does not support this * feature. diff --git a/src/H5Omodule.h b/src/H5Omodule.h index 18e329c..afb005b 100644 --- a/src/H5Omodule.h +++ b/src/H5Omodule.h @@ -28,7 +28,12 @@ #define H5_MY_PKG H5O #define H5_MY_PKG_ERR H5E_OHDR -/**\defgroup H5O H5O +/** \page H5O_UG The HDF5 Objects + * @todo Under Construction + */ + +/** + * \defgroup H5O Objects (H5O) * * Use the functions in this module to manage HDF5 objects. * diff --git a/src/H5Opublic.h b/src/H5Opublic.h index ba352c8..a6cea39 100644 --- a/src/H5Opublic.h +++ b/src/H5Opublic.h @@ -311,7 +311,7 @@ H5_DLL hid_t H5Oopen_by_token(hid_t loc_id, H5O_token_t token); * * \return \hid_tv{object} * - * \details H5Open_by_idx() opens the nth object in the group specified by \p loc_id + * \details H5Oopen_by_idx() opens the nth object in the group specified by \p loc_id * and \p group_name. * * \p loc_id specifies a location identifier. @@ -778,7 +778,7 @@ H5_DLL herr_t H5Olink(hid_t obj_id, hid_t new_loc_id, const char *new_name, hid_ * * An object’s reference count is the number of hard links in the * file that point to that object. See the “Programming Model” - * section of the HDF5 Groups chapter in the -- HDF5 User’s Guide + * section of the HDF5 Groups chapter in the -- \ref UG * for a more complete discussion of reference counts. * * If a user application needs to determine an object’s reference @@ -813,7 +813,7 @@ H5_DLL herr_t H5Oincr_refcount(hid_t object_id); * * An object’s reference count is the number of hard links in the * file that point to that object. See the “Programming Model” - * section of the HDF5 Groups chapter in the HDF5 User’s Guide + * section of the HDF5 Groups chapter in the \ref UG * for a more complete discussion of reference counts. * * If a user application needs to determine an object’s reference diff --git a/src/H5PLmodule.h b/src/H5PLmodule.h index 4751a48..9331c86 100644 --- a/src/H5PLmodule.h +++ b/src/H5PLmodule.h @@ -2,7 +2,7 @@ * Copyright by The HDF Group. * * All rights reserved. * * * - * This file is part of HDF5. The full HDF5 copyright notice, including * + * This file is part of HDF5. The full HDF5 copyright notice, including * * terms governing use, modification, and redistribution, is contained in * * the COPYING file, which can be found at the root of the source code * * distribution tree, or in https://www.hdfgroup.org/licenses. * @@ -26,7 +26,12 @@ #define H5_MY_PKG H5PL #define H5_MY_PKG_ERR H5E_PLUGIN -/**\defgroup H5PL H5PL +/** \page H5PL_UG The HDF5 Plugins + * @todo Under Construction + */ + +/** + * \defgroup H5PL Dynamically-loaded Plugins (H5PL) * * Use the functions in this module to manage the loading behavior of HDF5 * plugins. diff --git a/src/H5Pmodule.h b/src/H5Pmodule.h index d771e6e..d5ef982 100644 --- a/src/H5Pmodule.h +++ b/src/H5Pmodule.h @@ -28,7 +28,860 @@ #define H5_MY_PKG H5P #define H5_MY_PKG_ERR H5E_PLIST -/**\defgroup H5P H5P +/** \page H5P_UG Properties and Property Lists in HDF5 + * + * \section sec_plist Properties and Property Lists in HDF5 + * + * HDF5 property lists are the main vehicle to configure the + * behavior of HDF5 API functions. + * + * Typically, property lists are created by instantiating one of the built-in + * or user-defined property list classes. After adding suitable properties, + * property lists are used when opening or creating HDF5 items, or when reading + * or writing data. Property lists can be modified by adding or changing + * properties. Property lists are deleted by closing the associated handles. + * + * \subsection subsec_plist_intro Introduction + * + * HDF5 properties and property lists make it possible to shape or modify an HDF5 file, group, + * dataset, attribute, committed datatype, or even an I/O stream, in a number of ways. For example, + * you can do any of the following: + * \li Customize the storage layout of a file to suit a project or task. + * \li Create a chunked dataset. + * \li Apply compression or filters to raw data. + * \li Use either ASCII or UTF-8 character encodings. + * \li Create missing groups on the fly. + * \li Switch between serial and parallel I/O. + * \li Create consistency within a single file or across an international project. + * + * Some properties enable an HDF5 application to take advantage of the capabilities of a specific + * computing environment while others make a file more compact; some speed the reading or + * writing of data while others enable more record-keeping at a per-object level. HDF5 offers + * nearly one hundred specific properties that can be used in literally thousands of combinations to + * maximize the usability of HDF5-stored data. + * + * At the most basic level, a property list is a collection of properties, represented by name/value + * pairs that can be passed to various HDF5 functions, usually modifying default settings. A + * property list inherits a set of properties and values from a property list class. But that statement + * hardly provides a complete picture; in the rest of this section and in the next section, + * \ref subsec_plist_class , we will discuss these things in much more detail. + * After reading that material, the reader should have a reasonably complete understanding of how + * properties and property lists can be used in HDF5 applications. + * + * + * + * + * + *
    + * \image html PropListEcosystem.gif "The HDF5 property environment" + *
    + * + * The remaining sections in this chapter discuss the following topics: + * \li What are properties, property lists, and property list classes? + * \li Property list programming model + * \li Generic property functions + * \li Summary listings of property list functions + * \li Additional resources + * + * The discussions and function listings in this chapter focus on general property operations, object + * and link properties, and related functions. + * + * File, group, dataset, datatype, and attribute properties are discussed in the chapters devoted to + * those features, where that information will be most convenient to users. For example, \ref sec_dataset + * discusses dataset creation property lists and functions, dataset access property lists and + * functions, and dataset transfer property lists and functions. This chapter does not duplicate those + * discussions. + * + * Generic property operations are an advanced feature and are beyond the scope of this guide. + * + * This chapter assumes an understanding of the following chapters of this \ref UG + * \li \ref sec_data_model + * \li \ref sec_program + * + * \subsection subsec_plist_class Property List Classes, Property Lists, and Properties + * + * HDF5 property lists and the property list interface \ref H5P provide a mechanism for storing + * characteristics of objects in an HDF5 file and economically passing them around in an HDF5 + * application. In this capacity, property lists significantly reduce the burden of additional function + * parameters throughout the HDF5 API. Another advantage of property lists is that features can + * often be added to HDF5 by adding only property list functions to the API; this is particularly true + * when all other requirements of the feature can be accomplished internally to the library. + * + * For instance, a file creation operation needs to know several things about a file, such as the size + * of the userblock or the sizes of various file data structures. Bundling this information as a + * property list simplifies the interface by reducing the number of parameters to the function + * \ref H5Fcreate. + * + * As illustrated in the figure above ("The HDF5 property environment"), the HDF5 property + * environment is a three-level hierarchy: + * \li Property list classes + * \li Property lists + * \li Properties + * + * The following subsections discuss property list classes, property lists, and properties in more detail. + * + * \subsubsection subsubsec_plist_class Property List Classes + * + * A property list class defines the roles that property lists of that class can play. Each class includes + * all properties that are valid for that class with each property set to its default value. HDF5 offers + * a property lists class for each of the following situations. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    Property list classes in HDF5
    Property List ClassFor further discussion
    + * File creation (FCPL) + * + * \ref H5P_FILE_CREATE + * + * See various sections of \ref sec_file + *
    + * File access (FAPL) + * + * \ref H5P_FILE_ACCESS + * + * Used only as \ref H5P_DEFAULT. + *
    + * File mount (FMPL) + * + * \ref H5P_FILE_MOUNT + * + * For more information, see \ref FileMountProps "File Mount Properties" + *
    + * Object creation (OCPL) + * + * \ref H5P_OBJECT_CREATE + * + * See \ref OCPL + *
    + * Object copy (OCPYPL) + * + * \ref H5P_OBJECT_COPY + * + * + *
    + * Group creation (GCPL) + * + * \ref H5P_GROUP_CREATE + * + * See \ref subsec_group_program + *
    + * Group access (GAPL) + * + * \ref H5P_GROUP_ACCESS + * + * + *
    + * Link creation (LCPL) + * + * \ref H5P_LINK_CREATE + * + * See examples in \ref subsec_plist_program and \ref LCPL + *
    + * Link access (LAPL) + * + * \ref H5P_LINK_ACCESS + * + * + *
    + * Dataset creation (DCPL) + * + * \ref H5P_DATASET_CREATE + * + * See \ref subsec_dataset_program + *
    + * Dataset access (DAPL) + * + * \ref H5P_DATASET_ACCESS + * + * + *
    + * Dataset transfer (DXPL) + * + * \ref H5P_DATASET_XFER + * + * + *
    + * Datatype creation (TCPL) + * + * \ref H5P_DATATYPE_CREATE + * + * See various sections of \ref sec_datatype + *
    + * String creation (STRCPL) + * + * \ref H5P_STRING_CREATE + * + * See \ref subsec_dataset_program and \ref subsec_datatype_program + *
    + * Attribute creation (ACPL) + * + * \ref H5P_ATTRIBUTE_CREATE + * + * See \ref subsec_attribute_work. + *
    + * + * Note: In the table above, the abbreviations to the right of each property list class name in this + * table are widely used in both HDF5 programmer documentation and HDF5 source code. For + * example, \ref FCPL (FCPL) is the file creation property list, \ref OCPL (OCPL) is the object creation + * property list, \ref OCPYPL (OCPYPL) is object copy property list, and \ref STRCPL (STRCPL) is the string + * creation property list. These abbreviations may appear in either uppercase or lowercase. + * + * The “HDF5 property list class inheritance hierarchy” figure, immediately following, illustrates + * the inheritance hierarchy of HDF5’s property list classes. Properties are defined at the root of the + * HDF5 property environment (\ref PLCR in the figure below). Property list + * classes then inherit properties from that root, either directly or indirectly through a parent class. + * In every case, a property list class inherits only the properties relevant to its role. For example, + * the \ref OCPL (OCPL) inherits all properties that are relevant to the + * creation of any object while the \ref GCPL (GCPL) inherits only those + * properties that are relevant to group creation. + * + * + * + * + * + *
    + * \image html PropListClassInheritance.gif "HDF5 property list class inheritance hierarchy" + *
    + * Note: In the figure above, property list classes displayed in black are directly accessible through + * the programming interface; the root of the property environment and the \ref STRCPL and \ref OCPL + * property list classes, in gray above, are not user-accessible. The red empty set symbol indicates + * that the \ref FMPL (FMPL) is an empty class; that is, it has no set table + * properties. For more information, see \ref FileMountProps "File Mount Properties". Abbreviations + * used in this figure are defined in the preceding table, \ref table_plist "Property list classes in HDF5". + * + * \subsubsection subsubsec_plist_lists Property Lists + * + * A property list is a collection of related properties that are used together in specific + * circumstances. A new property list created from a property list class inherits the properties of the + * property list class and each property’s default value. A fresh dataset creation property list, for + * example, includes all of the HDF5 properties relevant to the creation of a new dataset. + * + * Property lists are implemented as containers holding a collection of name/value pairs. Each pair + * specifies a property name and a value for the property. A property list usually contains + * information for one to many properties. + * + * HDF5’s default property values are designed to be reasonable for general use cases. Therefore, + * an application can often use a property list without modification. On the other hand, adjusting + * property list settings is a routine action and there are many reasons for an application to do so. + * + * A new property list may either be derived from a property list class or copied from an existing + * property list. When a property list is created from a property list class, it contains all the + * properties that are relevant to the class, with each property set to its default value. A new + * property list created by copying an existing property list will contain the same properties and + * property values as the original property list. In either case, the property values can be changed as + * needed through the HDF5 API. + * + * Property lists can be freely reused to create consistency. For example, a single set of file, group, + * and dataset creation property lists might be created at the beginning of a project and used to + * create hundreds, thousands, even millions, of consistent files, file structures, and datasets over + * the project’s life. When such consistency is important to a project, this is an economical means + * of providing it. + * + * \subsubsection subsubsec_plist_props Properties + * + * A property is the basic element of the property list hierarchy. HDF5 offers nearly one hundred + * properties controlling things ranging from file access rights, to the storage layout of a dataset, + * through optimizing the use of a parallel computing environment. + * + * Further examples include the following: + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    PurposeExamplesProperty List
    + * Specify the driver to be used to open a file + * + * A POSIX driver or an MPI IO driver + * + * \ref FAPL + *
    + * Specify filters to be applied to a dataset + * + * Gzip compression or checksum evaluation + * + * \ref DCPL + *
    + * Specify whether to record key times associated with an object + * + * Creation time and/or last-modified time + * + * \ref OCPL + *
    + * Specify the access mode for a file opened via an external link + * + * Read-only or read-write + * + * \ref LAPL + *
    + * + * Each property is initialized with a default value. For each property, there are one or more + * dedicated H5Pset_*calls that can be used to change that value. + * + *

    Creation, access, and transfer properties:

    + * + * Properties fall into one of several major categories: creation properties, access properties, and + * transfer properties. + * + * Creation properties control permanent object characteristics. These characteristics must be + * established when an object is created, cannot change through the life of the object (they are + * immutable), and the property setting usually has a permanent presence in the file. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    Examples of creation properties include:
    + *

    + * Whether a dataset is stored in a compact, contiguous, or chunked layout
    + *
    + * The default for this dataset creation property (\ref H5Pset_layout) is that a dataset is + * stored in a contiguous block. This works well for datasets with a known size limit that + * will fit easily in system memory.
    + *
    + * A chunked layout is important if a dataset is to be compressed, to enable extending + * the dataset’s size, or to enable caching during I/O.
    + *
    + * A compact layout is suitable only for very small datasets because the raw data is + * stored in the object header. + *

    + *
    + *

    + * Creation of intermediate groups when adding an object to an HDF5 file
    + *
    + * This link creation property, \ref H5Pset_create_intermediate_group, enables an + * application to add an object in a file without having to know that the group or group + * hierarchy containing that object already exists. With this property set, HDF5 + * automatically creates missing groups. If this property is not set, an application must + * verify that each group in the path exists, and create those that do not, before creating + * the new object; if any group is missing, the create operation will fail. + *

    + *
    + *

    + * Whether an HDF5 file is a single file or a set of tightly related files that form a virtual + * HDF5 file
    + *
    + * Certain file creation properties enable the application to select one of several file + * layouts. Examples of the available layouts include a standard POSIX-compliant + * layout (\ref H5Pset_fapl_sec2), a family of files (\ref H5Pset_fapl_family), and a split file + * layout that separates raw data and metadata into separate files (\ref H5Pset_fapl_split). + * These and other file layout options are discussed in \ref subsec_file_alternate_drivers. + *

    + *
    + *

    + * To enable error detection when creating a dataset
    + *
    + * In settings where data integrity is vulnerable, it may be desirable to set + * checksumming when datasets are created (\ref H5Pset_fletcher32). A subsequent + * application will then have a means to verify data integrity when reading the dataset. + *

    + *
    + * + * Access properties control transient object characteristics. These characteristics may change with + * the circumstances under which an object is accessed. + * + * + * + * + * + * + * + * + * + *
    Examples of access properties include:
    + *

    + * The driver used to open a file
    + *
    + * For example, a file might be created with the MPI I/O driver (\ref H5Pset_fapl_mpio) + * during high-speed data acquisition in a parallel computing environment. The same + * file might later be analyzed in a serial computing environment with I/O access + * handled through the serial POSIX driver (\ref H5Pset_fapl_sec2). + *

    + *
    + *

    + * Optimization settings in specialized environments
    + *
    + * Optimizations differ across computing environments and according to the needs of + * the task being performed, so are transient by nature. + *

    + *
    + * + * Transfer properties apply only to datasets and control transient aspects of data I/O. These + * characteristics may change with the circumstances under which data is accessed. + * + * + * + * + * + * + * + * + * + *
    Examples of dataset transfer properties include:
    + *

    + * To enable error detection when reading a dataset
    + *
    + * If checksumming has been set on a dataset (with \ref H5Pset_fletcher32, in the dataset + * creation property list), an application reading that dataset can choose whether to check + * for data integrity (\ref H5Pset_edc_check). + *

    + *
    + *

    + * Various properties to optimize chunked data I/O on parallel computing systems
    + *
    + * HDF5 provides several properties for tuning I/O of chunked datasets in a parallel + * computing environment (\ref H5Pset_dxpl_mpio_chunk_opt, \ref H5Pset_dxpl_mpio_chunk_opt_num, + * \ref H5Pset_dxpl_mpio_chunk_opt_ratio, and \ref H5Pget_mpio_actual_chunk_opt_mode).
    + *
    + * Optimal settings differ due to the characteristics of a computing environment and due + * to an application’s data access patterns; even when working with the same file, these + * settings might change for every application and every platform. + *

    + *
    + * + * \subsection subsec_plist_program Programming Model for Properties and Property Lists + * + * The programming model for HDF5 property lists is actually quite simple: + * \li Create a property list. + * \li Modify the property list, if required. + * \li Use the property list. + * \li Close the property list. + * + * There are nuances, of course, but that is the basic process. + * + * In some cases, you will not have to define property lists at all. If the default property settings are + * sufficient for your application, you can tell HDF5 to use the default property list. + * + * The following sections first discuss the use of default property lists, then each step of the + * programming model, and finally a few less frequently used property list operations. + * + * \subsubsection subsubsec_plist_default Using Default Property Lists + * + * Default property lists can simplify many routine HDF5 tasks because you do not always have to + * create every property list you use. + * + * An application that would be well-served by HDF5’s default property settings can use the default + * property lists simply by substituting the value \ref H5P_DEFAULT for a property list identifier. + * HDF5 will then apply the default property list for the appropriate property list class. + * + * For example, the function \ref H5Dcreate2 calls for a link creation property list, a dataset creation + * property list, and a dataset access property list. If the default properties are suitable for a dataset, + * this call can be made as + * \code + * dset_id = H5Dcreate2( loc_id, name, dtype_id, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT ); + * \endcode + * HDF5 will then apply the default link creation, dataset creation, and dataset access property lists + * correctly. + * + * Of course, you would not want to do this without considering where it is appropriate, as there + * may be unforeseen consequences. Consider, for example, the use of chunked datasets. Optimal + * chunking is quite dependent on the makeup of the dataset and the most common access patterns, + * both of which must be taken into account in setting up the size and shape of chunks. + * + * \subsubsection subsubsec_plist_basic Basic Steps of the Programming Model + * + * The steps of the property list programming model are described in the sub-sections below. + * + *

    Create a Property List

    + * + * A new property list can be created either as an instance of a property list class or by copying an + * existing property list. Consider the following examples. A new dataset creation property list is + * first created "from scratch" with \ref H5Pcreate. A second dataset creation property list is then + * created by copying the first one with \ref H5Pcopy. + * + * \code + * dcplA_id = H5Pcreate (H5P_DATASET_CREATE); + * \endcode + * + * The new dataset creation property list is created as an instance of the property list class + * \ref H5P_DATASET_CREATE. + * + * The new dataset creation property list’s identifier is returned in dcplA_id and the property list is + * initialized with default dataset creation property values. + * + * A list of valid classes appears in the table \ref table_plist "Property list classes in HDF5". + * + * \code + * dcplB_id = H5Pcopy (dcplA_id); + * \endcode + * + * A new dataset creation property list, dcplB_id, is created as a copy of dcplA_id and is initialized + * with dataset creation property values currently in dcplA_id. + * + * At this point, dcplA_id and dcplB_id are identical; they will both contain any modified property + * values that were changed in dcplA_id before dcplB_id was created. They may, however, diverge + * as additional property values are reset in each. + * + * While we are creating property lists, let’s create a link creation property list; we will need this + * property list when the new dataset is linked into the file below: + * \code + * lcplAB_id = H5Pcreate (H5P_LINK_CREATE); + * \endcode + * + *

    Change Property Values

    + * + * This section describes how to set property values. + * + * Later in this section, the dataset creation property lists dcplA_id and dcplB_id created in the + * section above will be used respectively to create chunked and contiguous datasets. To set this up, + * we must set the layout property in each property list. The following example sets dcplA_id for + * chunked datasets and dcplB_id for contiguous datasets: + * \code + * error = H5Pset_layout (dcplA_id, H5D_CHUNKED); + * error = H5Pset_layout (dcplB_id, H5D_CONTIGUOUS); + * \endcode + * + * Since dcplA_id specifies a chunked layout, we must also set the number of dimensions and the + * size of the chunks. The example below specifies that datasets created with dcplA_id will be + * 3-dimensional and that the chunk size will be 100 in each dimension: + * \code + * error = H5Pset_chunk (dcplA_id, 3, [100,100,100]); + * \endcode + * + * These datasets will be created with UTF-8 encoded names. To accomplish that, the following + * example sets the character encoding property in the link creation property list to create link + * names with UTF-8 encoding: + * \code + * error = H5Pset_char_encoding (lcplAB_id, H5T_CSET_UTF8); + * \endcode + * + * dcplA_id can now be used to create chunked datasets and dcplB_id to create contiguous datasets. + * And with the use of lcplAB_id, they will be created with UTF-8 encoded names. + * + *

    Use the Property List

    + * + * Once the required property lists have been created, they can be used to control various HDF5 + * processes. For illustration, consider dataset creation. + * + * Assume that the datatype dtypeAB and the dataspaces dspaceA and dspaceB have been defined + * and that the location identifier locAB_id specifies the group AB in the current HDF5 file. We + * have already created the required link creation and dataset creation property lists. + * For the sake of illustration, we assume that the default dataset access property list meets our application + * requirements. The following calls would create the datasets dsetA and dsetB in the group AB. + * The raw data in dsetA will be contiguous while dsetB raw data will be chunked; both datasets + * will have UTF-8 encoded link names: + * + * \code + * dsetA_id = H5Dcreate2( locAB_id, dsetA, dtypeAB, dspaceA_id, + * lcplAB_id, dcplA_id, H5P_DEFAULT ); + * dsetB_id = H5Dcreate2( locAB_id, dsetB, dtypeAB, dspaceB_id, + * lcplAB_id, dcplB_id, H5P_DEFAULT ); + * \endcode + * + *

    Close the Property List

    + * + * Generally, creating or opening anything in an HDF5 file results in an HDF5 identifier. These + * identifiers are of HDF5 type hid_t and include things like file identifiers, often expressed as + * file_id; dataset identifiers, dset_id; and property list identifiers, plist_id. To reduce the risk of + * memory leaks, all of these identifiers must be closed once they are no longer needed. + * + * Property list identifiers are no exception to this rule, and \ref H5Pclose is used for this purpose. The + * calls immediately following would close the property lists created and used in the examples above. + * + * \code + * error = H5Pclose (dcplA_id); + * error = H5Pclose (dcplB_id); + * error = H5Pclose (lcplAB_id); + * \endcode + * + * \subsubsection subsubsec_plist_additional Additional Property List Operations + * + * A few property list operations fall outside of the programming model described above. This + * section describes those operations. + * + *

    Query the Class of an Existing Property List

    + * + * Occasionally an application will have a property list but not know the corresponding property list + * class. A call such as in the following example will retrieve the unknown class of a known property list: + * \code + * PList_Class = H5Pget_class (dcplA_id); + * \endcode + * + * Upon this function’s return, PList_Class will contain the value \ref H5P_DATASET_CREATE indicating that + * dcplA_id is a dataset creation property list. + + *

    Determine Current Creation Property List Settings in an Existing Object

    + * + * After a file has been created, another application may work on the file without knowing how the + * creation properties for the file were set up. Retrieving these property values is often unnecessary; + * HDF5 can read the data and knows how to deal with any properties it encounters. + * + * But sometimes an application must do something that requires knowing the creation property + * settings. HDF5 makes the acquisition of this information fairly straight-forward; for each + * property setting call, H5Pset_*, there is a corresponding H5Pget_*call to retrieve the property’s + * current setting. + * + * Consider the following examples which illustrate the determination of dataset layout and chunking settings: + * + * The application must first identify the creation property list with the appropriate get creation property + * list call. There is one such call for each kind of object. + * + * \ref H5Dget_create_plist will return a property list identifier for the creation property list that was + * used to create the dataset. Call it DCPL1_id. + * + * \ref H5Pset_layout sets a dataset’s layout to be compact, contiguous, or chunked. + * + * \ref H5Pget_layout called with DCPL1_id will return the dataset’s layout, + * either \ref H5D_COMPACT, \ref H5D_CONTIGUOUS, or \ref H5D_CHUNKED. + * + * \ref H5Pset_chunk sets the rank of a dataset, that is the number of dimensions it will have, and the + * maximum size of each dimension. + * + * \ref H5Pget_chunk, also called with DCPL1_id, will return the rank of the dataset and the maximum + * size of each dimension. + * + * If a creation property value has not been explicitly set, these H5Pget_calls will return the + * property’s default value. + * + *

    Determine Access Property Settings

    + * + * Access property settings are quite different from creation properties. Since access property + * settings are not retained in an HDF5 file or object, there is normally no knowledge of the settings + * that were used in the past. On the other hand, since access properties do not affect characteristics + * of the file or object, this is not normally an issue. For more information, see "Access and + * Creation Property Exceptions." + * + * One circumstance under which an application might need to determine access property settings + * might be when a file or object is already open but the application does not know the property list + * settings. In that case, the application can use the appropriate get access property list + * call to retrieve a property list identifier. For example, if the dataset dsetA + * from the earlier examples is still open, the following call would return an identifier for the dataset + * access property list in use: + * \code + * dsetA_dacpl_id = H5Dget_access_plist( dsetA_id ); + * \endcode + * + * The application could then use the returned property list identifier to analyze the property settings + * + * \subsection subsec_plist_generic Generic Properties Interface and User-defined Properties + * + * HDF5’s generic property interface provides tools for managing the entire property hierarchy and + * for the creation and management of user-defined property lists and properties. This interface also + * makes it possible for an application or a driver to create, modify, and manage custom properties, + * property lists, and property list classes. A comprehensive list of functions for this interface + * appears under "Generic Property Operations (Advanced)" in the "H5P: Property List Interface" + * section of the \ref RM. + * + * Further discussion of HDF5’s generic property interface and user-defined properties and + * property lists is beyond the scope of this document. + * + * \subsection subsec_plist_H5P Property List Function Summaries + * + * General property functions, generic property functions and macros, property functions that are + * used with multiple types of objects, and object and link property functions are listed below. + * + * Property list functions that apply to a specific type of object are listed in the chapter that + * discusses that object. For example, the \ref sec_dataset chapter has two property list function listings: + * one for dataset creation property list functions and one for dataset access property list functions. + * As has been stated, this chapter is not intended to describe every property list function. + * + * \ref H5P reference manual + * + * \subsection subsec_plist_resources Additional Property List Resources + * Property lists are ubiquitous in an HDF5 environment and are therefore discussed in many places + * in HDF5 documentation. The following sections and listings in the \ref UG are of + * particular interest: + * \li In the \ref sec_data_model chapter, see \ref subsubsec_data_model_abstract_plist. + * \li In the \ref sec_file chapter, see the following sections and listings: + *
    • \ref subsec_file_creation_access
    • + *
    • \ref subsec_file_property_lists
    • + *
    • \ref subsubsec_file_examples_props
    • + *
    • \ref subsubsec_file_examples_access
    • + *
    • "File creation property list functions (H5P)"
    • + *
    • "File access property list functions (H5P)"
    • + *
    • "File driver functions (H5P)"
    + * \li In the \ref sec_attribute chapter, see "Attribute creation property list functions (H5P)". + * \li In the \ref sec_group chapter, see "Group creation property list functions (H5P)". + * \li Property lists are discussed throughout \ref sec_dataset. + * + * All property list functions are described in the \ref H5P section of the + * \ref RM. The function index at the top of the page provides a categorized listing + * grouped by property list class. Those classes are listed below: + * \li File creation properties + * \li File access properties + * \li Group creation properties + * \li Dataset creation properties + * \li Dataset access properties + * \li Dataset transfer properties + * \li Link creation properties + * \li Link access properties + * \li Object creation properties + * \li Object copy properties + * + * Additional categories not related to the class structure are as follows: + * \li General property list operations + * \li Generic property list functions + * + * The general property functions can be used with any property list; the generic property functions + * constitute an advanced feature. + * + * The in-memory file image feature of HDF5 uses property lists in a manner that differs + * substantially from their use elsewhere in HDF5. Those who plan to use in-memory file images + * must study "File Image Operations" (PDF) in the Advanced Topics in HDF5collection. + * + * \subsection subsec_plist_notes Notes + * + * \anchor FileMountProps

    File Mount Properties

    + * + * While the file mount property list class \ref H5P_FILE_MOUNT is a valid HDF5 property list class, + * no file mount properties are defined by the HDF5 Library. References to a file mount property + * list should always be expressed as \ref H5P_DEFAULT, meaning the default file mount property list. + * + *

    Access and Creation Property Exceptions

    + * + * There are a small number of exceptions to the rule that creation properties are always retained in + * a file or object and access properties are never retained. + * + * The following properties are file access properties but they are not transient; they have + * permanent and different effects on a file. They could be validly classified as file creation + * properties as they must be set at creation time to properly create the file. But they are access + * properties because they must also be set when a file is reopened to properly access the file. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    PropertyRelated function
    + * Family file driver + * + * \ref H5Pset_fapl_family + *
    + * Split file driver + * + * \ref H5Pset_fapl_split + *
    + * Core file driver + * + * \ref H5Pset_fapl_core + *
    + * + * The following is a link creation property, but it is not relevant after an object has been created + * and is not retained in the file or object. + * + * + * + * + * + * + *
    PropertyRelated function
    + * Create missing intermediate groups + * + * \ref H5Pset_create_intermediate_group + *
    + * + * Previous Chapter \ref sec_error - Next Chapter \ref sec_vol + * + * \defgroup H5P Property Lists (H5P) * * Use the functions in this module to manage HDF5 property lists and property * list classes. HDF5 property lists are the main vehicle to configure the @@ -60,135 +913,118 @@ * * * - * \defgroup ALCAPL Attribute and Link Creation Properties - * \ingroup H5P + * \defgroup STRCPL String Creation Properties * Currently, there are only two creation properties that you can use to control * the creation of HDF5 attributes and links. The first creation property, the * choice of a character encoding, applies to both attributes and links. * The second creation property applies to links only, and advises the library * to automatically create missing intermediate groups when creating new objects. + * \ingroup H5P * - * \defgroup DAPL Dataset Access Properties + * \defgroup LCPL Link Creation Properties + * The first creation property, the choice of a character encoding, applies to + * both attributes and links. + * The second creation property applies to links only, and advises the library + * to automatically create missing intermediate groups when creating new objects. + * \ingroup STRCPL + * + * @see STRCPL + * + * \defgroup ACPL Attribute Creation Properties + * The creation property, the choice of a character encoding, applies to attributes. + * \ingroup STRCPL + * + * @see STRCPL + * + * \defgroup LAPL Link Access Properties * \ingroup H5P + * + * \defgroup DAPL Dataset Access Properties * Use dataset access properties to modify the default behavior of the HDF5 * library when accessing datasets. The properties include adjusting the size * of the chunk cache, providing prefixes for external content and virtual * dataset file paths, and controlling flush behavior, etc. These properties * are \Emph{not} persisted with datasets, and can be adjusted at runtime before * a dataset is created or opened. + * \ingroup LAPL * * \defgroup DCPL Dataset Creation Properties - * \ingroup H5P * Use dataset creation properties to control aspects of dataset creation such * as fill time, storage layout, compression methods, etc. * Unlike dataset access and transfer properties, creation properties \Emph{are} * stored with the dataset, and cannot be changed once a dataset has been * created. + * \ingroup OCPL * * \defgroup DXPL Dataset Transfer Properties - * \ingroup H5P * Use dataset transfer properties to customize certain aspects of reading * and writing datasets such as transformations, MPI-IO I/O mode, error * detection, etc. These properties are \Emph{not} persisted with datasets, * and can be adjusted at runtime before a dataset is read or written. + * \ingroup H5P * * \defgroup FAPL File Access Properties - * \ingroup H5P * Use file access properties to modify the default behavior of the HDF5 * library when accessing files. The properties include selecting a virtual * file driver (VFD), configuring the metadata cache (MDC), control * file locking, etc. These properties are \Emph{not} persisted with files, and * can be adjusted at runtime before a file is created or opened. + * \ingroup H5P * * \defgroup FCPL File Creation Properties - * \ingroup H5P * Use file creation properties to control aspects of file creation such * as setting a file space management strategy or creating a user block. * Unlike file access properties, creation properties \Emph{are} * stored with the file, and cannot be changed once a file has been * created. + * \ingroup GCPL * * \defgroup GAPL General Access Properties - * \ingroup H5P * The functions in this section can be applied to different kinds of property * lists. + * \ingroup LAPL * * \defgroup GCPL Group Creation Properties - * \ingroup H5P * Use group creation properties to control aspects of group creation such * as storage layout, compression, and link creation order tracking. * Unlike file access properties, creation properties \Emph{are} * stored with the group, and cannot be changed once a group has been * created. + * \ingroup OCPL * - * \defgroup GPLO General Property List Operations - * \ingroup H5P - * + * \defgroup PLCR Property List Class Root * Use the functions in this module to manage HDF5 property lists. - * - * - * - * - * - * - * - * - * - * - * - *
    CreateRead
    - * \snippet{lineno} H5P_examples.c create - * - * \snippet{lineno} H5P_examples.c read - *
    UpdateDelete
    - * \snippet{lineno} H5P_examples.c update - * - * \snippet{lineno} H5P_examples.c delete - *
    - * - * \defgroup GPLOA General Property List Operations (Advanced) * \ingroup H5P * + * \defgroup PLCRA Property List Class Root (Advanced) * You can create and customize user-defined property list classes using the * functions described below. Arbitrary user-defined properties can also * be inserted into existing property lists as so-called temporary properties. - * - * - * - * - * - * - * - * - * - * - * - * - *
    CreateRead
    - * \snippet{lineno} H5P_examples.c create_class - * - * \snippet{lineno} H5P_examples.c read_class - *
    UpdateDelete
    - * \snippet{lineno} H5P_examples.c update_class - * - * \snippet{lineno} H5P_examples.c delete_class - *
    - * - * \defgroup LAPL Link Access Properties * \ingroup H5P * * - * \defgroup MAPL Map Access Properties - * \ingroup H5P - * \defgroup OCPL Object Creation Properties * \ingroup H5P * + * \defgroup OCPYPL Object Copy Properties + * \ingroup H5P * - * \defgroup OCPPL Object Copy Properties + * \defgroup FMPL File Mount Properties + * Empty property class. * \ingroup H5P * * + * \defgroup TCPL Datatype Creation Properties + * TCPL isn't supported yet. + * \ingroup OCPL + * + * + * \defgroup TAPL Datatype Access Properties + * TAPL isn't supported yet. + * \ingroup LAPL + * + * + * */ #endif /* H5Pmodule_H */ diff --git a/src/H5Ppublic.h b/src/H5Ppublic.h index 8c021f2..5bf2b21 100644 --- a/src/H5Ppublic.h +++ b/src/H5Ppublic.h @@ -392,7 +392,7 @@ H5_DLLVAR hid_t H5P_CLS_LINK_ACCESS_ID_g; H5_DLLVAR hid_t H5P_CLS_VOL_INITIALIZE_ID_g; H5_DLLVAR hid_t H5P_CLS_REFERENCE_ACCESS_ID_g; -/* Default roperty list IDs */ +/* Default property list IDs */ /* (Internal to library, do not use! Use macros above) */ H5_DLLVAR hid_t H5P_LST_FILE_CREATE_ID_g; H5_DLLVAR hid_t H5P_LST_FILE_ACCESS_ID_g; @@ -421,7 +421,7 @@ H5_DLLVAR hid_t H5P_LST_REFERENCE_ACCESS_ID_g; /* Generic property list routines */ /** - * \ingroup GPLO + * \ingroup PLCR * * \brief Terminates access to a property list * @@ -439,7 +439,7 @@ H5_DLLVAR hid_t H5P_LST_REFERENCE_ACCESS_ID_g; */ H5_DLL herr_t H5Pclose(hid_t plist_id); /** - * \ingroup GPLOA + * \ingroup PLCRA * * \brief Closes an existing property list class * @@ -456,7 +456,7 @@ H5_DLL herr_t H5Pclose(hid_t plist_id); */ H5_DLL herr_t H5Pclose_class(hid_t plist_id); /** - * \ingroup GPLO + * \ingroup PLCR * * \brief Copies an existing property list to create a new property list * @@ -473,7 +473,7 @@ H5_DLL herr_t H5Pclose_class(hid_t plist_id); */ H5_DLL hid_t H5Pcopy(hid_t plist_id); /** - * \ingroup GPLOA + * \ingroup PLCRA * * \brief Copies a property from one list or class to another * @@ -509,7 +509,7 @@ H5_DLL hid_t H5Pcopy(hid_t plist_id); */ H5_DLL herr_t H5Pcopy_prop(hid_t dst_id, hid_t src_id, const char *name); /** - * \ingroup GPLO + * \ingroup PLCR * * \brief Creates a new property list as an instance of a property list class * @@ -633,7 +633,7 @@ H5_DLL herr_t H5Pcopy_prop(hid_t dst_id, hid_t src_id, const char *name); */ H5_DLL hid_t H5Pcreate(hid_t cls_id); /** - * \ingroup GPLOA + * \ingroup PLCRA * * \brief Creates a new property list class * @@ -676,7 +676,7 @@ H5_DLL hid_t H5Pcreate_class(hid_t parent, const char *name, H5P_cls_create_func H5P_cls_copy_func_t copy, void *copy_data, H5P_cls_close_func_t close, void *close_data); /** - * \ingroup GPLO + * \ingroup PLCR * * \brief Decodes property list received in a binary object buffer and * returns a new property list identifier @@ -705,7 +705,7 @@ H5_DLL hid_t H5Pcreate_class(hid_t parent, const char *name, H5P_cls_create_func */ H5_DLL hid_t H5Pdecode(const void *buf); /** - * \ingroup GPLO + * \ingroup PLCR * * \brief Encodes the property values in a property list into a binary * buffer @@ -759,7 +759,7 @@ H5_DLL hid_t H5Pdecode(const void *buf); */ H5_DLL herr_t H5Pencode2(hid_t plist_id, void *buf, size_t *nalloc, hid_t fapl_id); /** - * \ingroup GPLOA + * \ingroup PLCRA * * \brief Compares two property lists or classes for equality * @@ -779,7 +779,7 @@ H5_DLL herr_t H5Pencode2(hid_t plist_id, void *buf, size_t *nalloc, hid_t fapl_i */ H5_DLL htri_t H5Pequal(hid_t id1, hid_t id2); /** - * \ingroup GPLOA + * \ingroup PLCRA * * \brief Queries whether a property name exists in a property list or * class @@ -797,7 +797,7 @@ H5_DLL htri_t H5Pequal(hid_t id1, hid_t id2); */ H5_DLL htri_t H5Pexist(hid_t plist_id, const char *name); /** - * \ingroup GPLOA + * \ingroup PLCRA * * \brief Queries the value of a property * @@ -829,7 +829,7 @@ H5_DLL htri_t H5Pexist(hid_t plist_id, const char *name); */ H5_DLL herr_t H5Pget(hid_t plist_id, const char *name, void *value); /** - *\ingroup GPLO + * \ingroup PLCR * * \brief Returns the property list class identifier for a property list * @@ -892,7 +892,7 @@ H5_DLL herr_t H5Pget(hid_t plist_id, const char *name, void *value); */ H5_DLL hid_t H5Pget_class(hid_t plist_id); /** - * \ingroup GPLOA + * \ingroup PLCRA * * \brief Retrieves the name of a class * @@ -1036,7 +1036,7 @@ H5_DLL hid_t H5Pget_class(hid_t plist_id); */ H5_DLL char *H5Pget_class_name(hid_t pclass_id); /** - * \ingroup GPLOA + * \ingroup PLCRA * * \brief Retrieves the parent class of a property class * @@ -1052,7 +1052,7 @@ H5_DLL char *H5Pget_class_name(hid_t pclass_id); */ H5_DLL hid_t H5Pget_class_parent(hid_t pclass_id); /** - * \ingroup GPLOA + * \ingroup PLCRA * * \brief Queries the number of properties in a property list or class * @@ -1075,7 +1075,7 @@ H5_DLL hid_t H5Pget_class_parent(hid_t pclass_id); */ H5_DLL herr_t H5Pget_nprops(hid_t id, size_t *nprops); /** - * \ingroup GPLOA + * \ingroup PLCRA * * \brief Queries the size of a property value in bytes * @@ -1096,7 +1096,7 @@ H5_DLL herr_t H5Pget_nprops(hid_t id, size_t *nprops); */ H5_DLL herr_t H5Pget_size(hid_t id, const char *name, size_t *size); /** - * \ingroup GPLOA + * \ingroup PLCRA * * \brief Registers a temporary property with a property list * @@ -1346,7 +1346,7 @@ H5_DLL herr_t H5Pinsert2(hid_t plist_id, const char *name, size_t size, void *va H5P_prp_get_func_t get, H5P_prp_delete_func_t prp_del, H5P_prp_copy_func_t copy, H5P_prp_compare_func_t compare, H5P_prp_close_func_t close); /** - * \ingroup GPLOA + * \ingroup PLCRA * * \brief Determines whether a property list is a member of a class * @@ -1366,7 +1366,7 @@ H5_DLL herr_t H5Pinsert2(hid_t plist_id, const char *name, size_t size, void *va */ H5_DLL htri_t H5Pisa_class(hid_t plist_id, hid_t pclass_id); /** - * \ingroup GPLOA + * \ingroup PLCRA * * \brief Iterates over properties in a property class or list * @@ -1412,7 +1412,7 @@ H5_DLL htri_t H5Pisa_class(hid_t plist_id, hid_t pclass_id); */ H5_DLL int H5Piterate(hid_t id, int *idx, H5P_iterate_t iter_func, void *iter_data); /** - * \ingroup GPLOA + * \ingroup PLCRA * * \brief Registers a permanent property with a property list class * @@ -1693,7 +1693,7 @@ H5_DLL herr_t H5Pregister2(hid_t cls_id, const char *name, size_t size, void *de H5P_prp_delete_func_t prp_del, H5P_prp_copy_func_t copy, H5P_prp_compare_func_t compare, H5P_prp_close_func_t close); /** - * \ingroup GPLOA + * \ingroup PLCRA * * \brief Removes a property from a property list * @@ -1719,7 +1719,7 @@ H5_DLL herr_t H5Pregister2(hid_t cls_id, const char *name, size_t size, void *de */ H5_DLL herr_t H5Premove(hid_t plist_id, const char *name); /** - * \ingroup GPLOA + * \ingroup PLCRA * * \brief Sets a property list value * @@ -1751,7 +1751,7 @@ H5_DLL herr_t H5Premove(hid_t plist_id, const char *name); */ H5_DLL herr_t H5Pset(hid_t plist_id, const char *name, const void *value); /** - * \ingroup GPLOA + * \ingroup PLCRA * * \brief Removes a property from a property list class * @@ -1770,8 +1770,6 @@ H5_DLL herr_t H5Pset(hid_t plist_id, const char *name, const void *value); */ H5_DLL herr_t H5Punregister(hid_t pclass_id, const char *name); -/* Object creation property list (OCPL) routines */ - /** * \ingroup DCPL * @@ -1791,6 +1789,9 @@ H5_DLL herr_t H5Punregister(hid_t pclass_id, const char *name); * */ H5_DLL htri_t H5Pall_filters_avail(hid_t plist_id); + +/* Object creation property list (OCPL) routines */ + /** * \ingroup OCPL * @@ -8203,7 +8204,7 @@ H5_DLL herr_t H5Pset_dataset_io_hyperslab_selection(hid_t plist_id, unsigned ran /* Link creation property list (LCPL) routines */ /** - * \ingroup ALCAPL + * \ingroup STRCPL * * \brief Determines whether property is set to enable creating missing * intermediate groups @@ -8234,7 +8235,7 @@ H5_DLL herr_t H5Pset_dataset_io_hyperslab_selection(hid_t plist_id, unsigned ran */ H5_DLL herr_t H5Pget_create_intermediate_group(hid_t plist_id, unsigned *crt_intmd /*out*/); /** - * \ingroup ALCAPL + * \ingroup STRCPL * * \brief Specifies in property list whether to create missing * intermediate groups @@ -8618,7 +8619,7 @@ H5_DLL herr_t H5Pget_map_iterate_hints(hid_t mapl_id, size_t *key_prefetch_size /* String creation property list (STRCPL) routines */ /** - * \ingroup ALCAPL + * \ingroup STRCPL * * \brief Retrieves the character encoding used to create a link or * attribute name @@ -8647,7 +8648,7 @@ H5_DLL herr_t H5Pget_map_iterate_hints(hid_t mapl_id, size_t *key_prefetch_size */ H5_DLL herr_t H5Pget_char_encoding(hid_t plist_id, H5T_cset_t *encoding /*out*/); /** - * \ingroup ALCAPL + * \ingroup STRCPL * * \brief Sets the character encoding used to encode link and attribute * names @@ -8688,7 +8689,6 @@ H5_DLL herr_t H5Pget_char_encoding(hid_t plist_id, H5T_cset_t *encoding /*out*/) */ H5_DLL herr_t H5Pset_char_encoding(hid_t plist_id, H5T_cset_t encoding); -/* Link access property list (LAPL) routines */ /** * \ingroup LAPL * @@ -9047,7 +9047,7 @@ H5_DLL herr_t H5Pset_nlinks(hid_t plist_id, size_t nlinks); /* Object copy property list (OCPYPL) routines */ /** - * \ingroup OCPPL + * \ingroup OCPYPL * * \brief Adds a path to the list of paths that will be searched in the * destination file for a matching committed datatype @@ -9162,7 +9162,7 @@ H5_DLL herr_t H5Pset_nlinks(hid_t plist_id, size_t nlinks); */ H5_DLL herr_t H5Padd_merge_committed_dtype_path(hid_t plist_id, const char *path); /** - * \ingroup OCPPL + * \ingroup OCPYPL * * \brief Clears the list of paths stored in the object copy property list * @@ -9213,7 +9213,7 @@ H5_DLL herr_t H5Padd_merge_committed_dtype_path(hid_t plist_id, const char *path */ H5_DLL herr_t H5Pfree_merge_committed_dtype_paths(hid_t plist_id); /** - * \ingroup OCPPL + * \ingroup OCPYPL * * \brief Retrieves the properties to be used when an object is copied * @@ -9238,7 +9238,7 @@ H5_DLL herr_t H5Pfree_merge_committed_dtype_paths(hid_t plist_id); */ H5_DLL herr_t H5Pget_copy_object(hid_t plist_id, unsigned *copy_options /*out*/); /** - * \ingroup OCPPL + * \ingroup OCPYPL * * \brief Retrieves the callback function from the specified object copy * property list @@ -9276,7 +9276,7 @@ H5_DLL herr_t H5Pget_copy_object(hid_t plist_id, unsigned *copy_options /*out*/) */ H5_DLL herr_t H5Pget_mcdt_search_cb(hid_t plist_id, H5O_mcdt_search_cb_t *func, void **op_data); /** - * \ingroup OCPPL + * \ingroup OCPYPL * * \brief Sets properties to be used when an object is copied * @@ -9369,7 +9369,7 @@ H5_DLL herr_t H5Pget_mcdt_search_cb(hid_t plist_id, H5O_mcdt_search_cb_t *func, */ H5_DLL herr_t H5Pset_copy_object(hid_t plist_id, unsigned copy_options); /** - * \ingroup OCPPL + * \ingroup OCPYPL * * \brief Sets the callback function that H5Ocopy() will invoke before * searching the entire destination file for a matching committed @@ -9467,7 +9467,7 @@ H5_DLL herr_t H5Pset_mcdt_search_cb(hid_t plist_id, H5O_mcdt_search_cb_t func, v /* Typedefs */ /** - * \ingroup GPLOA + * \ingroup PLCRA * * \brief Registers a permanent property with a property list class * @@ -9597,7 +9597,7 @@ H5_DLL herr_t H5Pregister1(hid_t cls_id, const char *name, size_t size, void *de H5P_prp_get_func_t prp_get, H5P_prp_delete_func_t prp_del, H5P_prp_copy_func_t prp_copy, H5P_prp_close_func_t prp_close); /** - * \ingroup GPLOA + * \ingroup PLCRA * * \brief Registers a temporary property with a property list * @@ -9709,7 +9709,7 @@ H5_DLL herr_t H5Pinsert1(hid_t plist_id, const char *name, size_t size, void *va H5P_prp_delete_func_t prp_delete, H5P_prp_copy_func_t prp_copy, H5P_prp_close_func_t prp_close); /** - * \ingroup GPLO + * \ingroup PLCRA * * \brief Encodes the property values in a property list into a binary * buffer diff --git a/src/H5Rmodule.h b/src/H5Rmodule.h index d9ab968..5e3affb 100644 --- a/src/H5Rmodule.h +++ b/src/H5Rmodule.h @@ -24,34 +24,17 @@ #define H5_MY_PKG H5R #define H5_MY_PKG_ERR H5E_REFERENCE +/** \page H5R_UG The HDF5 References + * @todo Under Construction + */ + /** - * \defgroup H5R H5R + * \defgroup H5R References (H5R) * * Use the functions in this module to manage HDF5 references. Referents can * be HDF5 objects, attributes, and selections on datasets a.k.a. dataset * regions. * - * - * - * - * - * - * - * - * - * - * - * - *
    CreateRead
    - * \snippet{lineno} H5R_examples.c create - * - * \snippet{lineno} H5R_examples.c read - *
    UpdateDelete
    - * \snippet{lineno} H5R_examples.c update - * - * \snippet{lineno} H5R_examples.c delete - *
    - * */ #endif /* H5Rmodule_H */ diff --git a/src/H5Smodule.h b/src/H5Smodule.h index 72d722a..73f5953 100644 --- a/src/H5Smodule.h +++ b/src/H5Smodule.h @@ -28,7 +28,1494 @@ #define H5_MY_PKG H5S #define H5_MY_PKG_ERR H5E_DATASPACE -/**\defgroup H5S H5S +/** \page H5S_UG Dataspaces and Partial I/O + * + * + * \section sec_dataspace HDF5 Dataspaces and Partial I/O + * + * HDF5 dataspaces describe the \Emph{shape} of datasets in memory or in HDF5 + * files. Dataspaces can be empty (#H5S_NULL), a singleton (#H5S_SCALAR), or + * a multi-dimensional, regular grid (#H5S_SIMPLE). Dataspaces can be re-shaped. + * + * Subsets of dataspaces can be "book-marked" or used to restrict I/O operations + * using \Emph{selections}. Furthermore, certain set operations are supported + * for selections. + * + * \subsection subsec_dataspace_intro Introduction + * + * The HDF5 \Emph{dataspace} is a required component of an HDF5 dataset or attribute definition. The dataspace + * defines the size and shape of the dataset or attribute raw data. In other words, a dataspace defines the + * number of dimensions and the size of each dimension of the multidimensional array in which the raw data + * is represented. The dataspace must be defined when the dataset or attribute is created. + * + * The \Emph{dataspace} is also used during dataset I/O operations, defining the elements of the dataset that + * participate in the I/O operation. + * + * This chapter explains the \Emph{dataspace} object and its use in dataset and attribute creation and data + * transfer. It also describes selection operations on a dataspace used to implement sub‐setting, + * sub‐sampling, and scatter‐gather access to datasets. + * + * \subsection subsec_dataspace_function Dataspace Function Summaries + * @see H5S reference manual provides a reference list of dataspace functions, the H5S APIs. + * + * \subsection subsec_dataspace_program Definition of Dataspace Objects and the Dataspace Programming Model + * + * This section introduces the notion of the HDF5 dataspace object and a programming model for creating + * and working with dataspaces. + * + * \subsubsection subsubsec_dataspace_program_object Dataspace Objects + * + * An HDF5 dataspace is a required component of an HDF5 dataset or attribute. A dataspace defines the size + * and the shape of a dataset’s or an attribute’s raw data. Currently, HDF5 supports the following types of + * the dataspaces: + * \li Scalar dataspaces + * \li Simple dataspaces + * \li Null dataspaces + * + * A scalar dataspace, #H5S_SCALAR, represents just one element, a scalar. Note that the datatype of this one + * element may be very complex; example would be a compound structure with members being of any + * allowed HDF5 datatype, including multidimensional arrays, strings, and nested compound structures. By + * convention, the rank of a scalar dataspace is always 0 (zero); think of it geometrically as a single, + * dimensionless point, though that point may be complex. + * + * A simple dataspace, #H5S_SIMPLE , is a multidimensional array of elements. The dimensionality of the + * dataspace (or the rank of the array) is fixed and is defined at creation time. The size of each dimension + * can grow during the life time of the dataspace from the current size up to the maximum size. Both the + * current size and the maximum size are specified at creation time. The sizes of dimensions at any particular + * time in the life of a dataspace are called the current dimensions, or the dataspace extent. They can be + * queried along with the maximum sizes. + * + * A null dataspace, #H5S_NULL, contains no data elements. Note that no selections can be applied to a null + * dataset as there is nothing to select. + * + * As shown in the UML diagram in the figure below, an HDF5 simple dataspace object has three attributes: + * the rank or number of dimensions; the current sizes, expressed as an array of length rank with each element + * of the array denoting the current size of the corresponding dimension; and the maximum sizes, + * expressed as an array of length rank with each element of the array denoting the maximum size of the + * corresponding dimension. + * + * + * + * + * + *
    + * \image html Dspace_simple.gif "A simple dataspace" + *
    + * + * \em Note: A simple dataspace is defined by its rank, the current size of each dimension, and the maximum + * size of each dimension. + * + * The size of a current dimension cannot be greater than the maximum size, which can be unlimited, specified + * as #H5S_UNLIMITED. Note that while the HDF5 file format and library impose no maximum size on an + * unlimited dimension, practically speaking its size will always be limited to the biggest integer available + * on the particular system being used. + * + * Dataspace rank is restricted to 32, the standard limit in C on the rank of an array, in the current + * implementation of the HDF5 Library. The HDF5 file format, on the other hand, allows any rank up to the + * maximum integer value on the system, so the library restriction can be raised in the future if higher + * dimensionality is required. + * + * Note that most of the time Fortran applications calling HDF5 will work with dataspaces of rank less than + * or equal to seven, since seven is the maximum number of dimensions in a Fortran array. But dataspace rank + * is not limited to seven for Fortran applications. + * + * The current dimensions of a dataspace, also referred to as the dataspace extent, define the bounding box + * for dataset elements that can participate in I/O operations. + * + * \subsubsection subsubsec_dataspace_program_model Dataspace Programming Model + * + * The programming model for creating and working with HDF5 dataspaces can be summarized as follows: + * \li 1. Create a dataspace + * \li 2. Use the dataspace to create a dataset in the file or to describe a data array in memory + * \li 3. Modify the dataspace to define dataset elements that will participate in I/O operations + * \li 4. Use the modified dataspace while reading/writing dataset raw data or to create a region reference + * \li 5. Close the dataspace when no longer needed + * + * The rest of this section will address steps 1, 2, and 5 of the programming model; steps 3 and 4 will be + * discussed in later sections of this chapter. + * + *

    Creating a Dataspace

    + * + * A dataspace can be created by calling the \ref H5Screate function. Since the + * definition of a simple dataspace requires the specification of dimensionality (or rank) and initial and + * maximum dimension sizes, the HDF5 Library provides a convenience API, \ref H5Screate_simple to create a + * simple dataspace in one step. + * + * The following examples illustrate the usage of these APIs. + * + *

    Creating a Scalar Dataspace

    + * + * Creating a Scalar Dataspace + * \code + * hid_t space_id; + * . . . + * space_id = H5Screate(H5S_SCALAR); + * \endcode + * As mentioned above, the dataspace will contain only one element. Scalar dataspaces are used more often + * for describing attributes that have just one value. For example, the attribute temperature with the value + * Celsius is used to indicate that the dataset with this attribute stores temperature values using the + * Celsius scale. + * + *

    Creating a Null Dataspace

    + * + * A null dataspace is created with the \ref H5Screate function. + * \code + * hid_t space_id; + * . . . + * space_id = H5Screate(H5S_NULL); + * \endcode + * As mentioned above, the dataspace will contain no elements. + * + *

    Creating a Simple Dataspace

    + * + * Let’s assume that an application wants to store a two‐dimensional array of data, A(20,100). During the + * life of the application, the first dimension of the array can grow up to 30; there is no restriction on + * the size of the second dimension. The following steps are used to declare a dataspace for the dataset + * in which the array data will be stored. + * \code + * hid_t space_id; + * int rank = 2; + * hsize_t current_dims[2] = {20, 100}; + * hsize_t max_dims[2] = {30, H5S_UNLIMITED}; + * . . . + * space_id = H5Screate(H5S_NULL); + * H5Sset_extent_simple(space_id, rank, current_dims, max_dims); + * \endcode + * + * Alternatively, the convenience APIs H5Screate_simple/h5screate_simple_f can replace the + * H5Screate/h5screate_f and H5Sset_extent_simple/h5sset_extent_simple_f calls. + * \code + * space_id = H5Screate_simple(rank, current_dims, max_dims); + * \endcode + * + * In this example, a dataspace with current dimensions of 20 by 100 is created. The first dimension can be + * extended only up to 30. The second dimension, however, is declared unlimited; it can be extended up to + * the largest available integer value on the system. + * + * Note that when there is a difference between the current dimensions and the maximum dimensions of an + * array, then chunking storage must be used. In other words, if the number of dimensions may change over + * the life of the dataset, then chunking must be used. If the array dimensions are fixed (if the number of + * current dimensions is equal to the maximum number of dimensions when the dataset is created), then + * contiguous storage can be used. For more information, see "Data Transfer". + * + * Maximum dimensions can be the same as current dimensions. In such a case, the sizes of dimensions + * cannot be changed during the life of the dataspace object. In C, \c NULL can be used to indicate to the + * \ref H5Screate_simple and \ref H5Sset_extent_simple functions that the maximum sizes of all dimensions + * are the same as the current sizes. + * \code + * space_id = H5Screate_simple(rank, current_dims, NULL); + * \endcode + * The created dataspace will have current and maximum dimensions of 20 and 100 correspondingly, and the + * sizes of those dimensions cannot be changed. + * + *

    C versus Fortran Dataspaces

    + * + * Dataspace dimensions are numbered from 1 to rank. HDF5 uses C storage conventions, assuming that the + * last listed dimension is the fastest‐changing dimension and the first‐listed dimension is the slowest + * changing. The HDF5 file format storage layout specification adheres to the C convention and the HDF5 + * Library adheres to the same convention when storing dataspace dimensions in the file. This affects how + * C programs and tools interpret data written from Fortran programs and vice versa. The example below + * illustrates the issue. + * + * When a Fortran application describes a dataspace to store an array as A(20,100), it specifies the value of + * the first dimension to be 20 and the second to be 100. Since Fortran stores data by columns, the + * first‐listed dimension with the value 20 is the fastest‐changing dimension and the last‐listed dimension + * with the value 100 is the slowest‐changing. In order to adhere to the HDF5 storage convention, the HDF5 + * Fortran wrapper transposes dimensions, so the first dimension becomes the last. The dataspace dimensions + * stored in the file will be 100,20 instead of 20,100 in order to correctly describe the Fortran data that + * is stored in 100 columns, each containing 20 elements. + * + * When a Fortran application reads the data back, the HDF5 Fortran wrapper transposes the dimensions + * once more, returning the first dimension to be 20 and the second to be 100, describing correctly the sizes + * of the array that should be used to read data in the Fortran array A(20,100). + * + * When a C application reads data back, the dimensions will come out as 100 and 20, correctly describing + * the size of the array to read data into, since the data was written as 100 records of 20 elements each. + * Therefore C tools such as h5dump and h5ls always display transposed dimensions and values for the data + * written by a Fortran application. + * + * Consider the following simple example of equivalent C 3 x 5 and Fortran 5 x 3 arrays. As illustrated in + * the figure below, a C application will store a 3 x 5 2‐dimensional array as three 5‐element rows. In order + * to store the same data in the same order, a Fortran application must view the array as a 5 x 3 array with + * three 5‐element columns. The dataspace of this dataset, as written from Fortran, will therefore be + * described as 5 x 3 in the application but stored and described in the file according to the C convention + * as a 3 x 5 array. This ensures that C and Fortran applications will always read the data in the order in + * which it was written. The HDF5 Fortran interface handles this transposition automatically. + * \code + * // C + * \#define NX 3 // dataset dimensions + * \#define NY 5 + * . . . + * int data[NX][NY]; // data to write + * . . . + * // Data and output buffer initialization. + * for (j = 0; j < NX; j++) + * for (i = 0; i < NY; i++) + * data[j][i] = i + j; + * // + * // 1 2 3 4 5 + * // 6 7 8 9 10 + * // 11 12 13 14 15 + * // + * . . . + * dims[0] = NX; + * dims[1] = NY; + * dataspace = H5Screate_simple(RANK, dims, NULL); + * \endcode + * + * \code + * ! Fortran + * INTEGER, PARAMETER :: NX = 3 + * INTEGER, PARAMETER :: NX = 5 + * . . . + * INTEGER(HSIZE_T), DIMENSION(2) :: dims = (/NY, NX/) ! Dataset dimensions + * . . . + * ! + * ! Initialize data + * ! + * do i = 1, NY + * do j = 1, NX + * data(i,j) = i + (j-1)*NY + * enddo + * enddo + * ! + * ! Data + * ! + * ! 1 6 11 + * ! 2 7 12 + * ! 3 8 13 + * ! 4 9 14 + * ! 5 10 15 + * . . . + * CALL h5screate_simple_f(rank, dims, dspace_id, error) + * \endcode + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    Comparing C and Fortran dataspaces
    + * A dataset stored by a C program in a 3 x 5 array: + *
    + * \image html Dspace_CvsF1.gif + *
    + * The same dataset stored by a Fortran program in a 5 x 3 array: + *
    + * \image html Dspace_CvsF2.gif + *
    + * The first dataset above as written to an HDF5 file from C or the second dataset above as written + * from Fortran: + *
    + * \image html Dspace_CvsF3.gif + *
    + * The first dataset above as written to an HDF5 file from Fortran: + *
    + * \image html Dspace_CvsF4.gif + *
    + * + * Note: The HDF5 Library stores arrays along the fastest‐changing dimension. This approach is often + * referred to as being “in C order.” C, C++, and Java work with arrays in row‐major order. In other words, + * the row, or the last dimension, is the fastest‐changing dimension. Fortran, on the other hand, handles + * arrays in column‐major order making the column, or the first dimension, the fastest‐changing dimension. + * Therefore, the C and Fortran arrays illustrated in the top portion of this figure are stored identically + * in an HDF5 file. This ensures that data written by any language can be meaningfully read, interpreted, + * and manipulated by any other. + * + *

    Finding Dataspace Characteristics

    + * + * The HDF5 Library provides several APIs designed to query the characteristics of a dataspace. + * + * The function \ref H5Sis_simple returns information about the type of a dataspace. + * This function is rarely used and currently supports only simple and scalar dataspaces. + * + * To find out the dimensionality, or rank, of a dataspace, use \ref H5Sget_simple_extent_ndims. + * \ref H5Sget_simple_extent_dims can also be used to find out the rank. See + * the example below. If both functions return 0 for the value of rank, then the dataspace is scalar. + * + * To query the sizes of the current and maximum dimensions, use \ref H5Sget_simple_extent_dims. + * + * The following example illustrates querying the rank and dimensions of a dataspace using these functions. + * \code + * hid_t space_id; + * int rank; + * hsize_t *current_dims; + * hsize_t *max_dims; + * . . . + * rank = H5Sget_simple_extent_ndims(space_id); + * // (or rank = H5Sget_simple_extent_dims(space_id, NULL, NULL);) + * current_dims = (hsize_t)malloc(rank * sizeof(hsize_t)); + * max_dims = (hsize_t)malloc(rank * sizeof(hsize_t)); + * H5Sget_simple_extent_dims(space_id, current_dims, max_dims); + * // Print values here + * \endcode + * + * \subsection subsec_dataspace_transfer Dataspaces and Data Transfer + * + * Read and write operations transfer data between an HDF5 file on disk and in memory. The shape that the + * array data takes in the file and in memory may be the same, but HDF5 also allows users the ability to + * represent data in memory in a different shape than in the file. If the shape of an array in the file and + * in memory will be the same, then the same dataspace definition can be used for both. If the shape of an + * array in memory needs to be different than the shape in the file, then the dataspace definition for the + * shape of the array in memory can be changed. During a read operation, the array will be read into the + * different shape in memory, and during a write operation, the array will be written to the file in the + * shape specified by the dataspace in the file. The only qualification is that the number of elements read + * or written must be the same in both the source and the destination dataspaces. + * + * Item a in the figure below shows a simple example of a read operation in which the data is stored as a 3 + * by 4 array in the file (item b) on disk, but the program wants it to be a 4 by 3 array in memory. This is + * accomplished by setting the memory dataspace to describe the desired memory layout, as in item c. The read + * operation reads the data in the file array into the memory array. + * + * + * + * + * + *
    + * \image html Dspace_read.gif "Data layout before and after a read operation" + *
    + * + * + * + * + * + *
    + * \image html Dspace_move.gif "Moving data from disk to memory" + *
    + + * Both the source and destination are stored as contiguous blocks of storage with the elements in the order + * specified by the dataspace. The figure above shows one way the elements might be organized. In item a, + * the elements are stored as 3 blocks of 4 elements. The destination is an array of 12 elements in memory + * (see item c). As the figure suggests, the transfer reads the disk blocks into a memory buffer (see item b), + * and then writes the elements to the correct locations in memory. A similar process occurs in reverse when + * data is written to disk. + * + * \subsubsection subsubsec_dataspace_transfer_select Data Selection + * + * In addition to rearranging data, the transfer may select the data elements from the source and destination. + * + * Data selection is implemented by creating a dataspace object that describes the selected elements (within + * the hyper rectangle) rather than the whole array. Two dataspace objects with selections can be used in + * data transfers to read selected elements from the source and write selected elements to the destination. + * When data is transferred using the dataspace object, only the selected elements will be transferred. + * + * This can be used to implement partial I/O, including: + * \li Sub‐setting ‐ reading part of a large dataset + * \li Sampling ‐ reading selected elements (for example, every second element) of a dataset + * \li Scatter‐gather ‐ read non‐contiguous elements into contiguous locations (gather) or read contiguous + * elements into non‐contiguous locations (scatter) or both + * + * To use selections, the following steps are followed: + * \li 1. Get or define the dataspace for the source and destination + * \li 2. Specify one or more selections for source and destination dataspaces + * \li 3. Transfer data using the dataspaces with selections + * + * A selection is created by applying one or more selections to a dataspace. A selection may override any + * other selections (#H5S_SELECT_SET) or may be “Ored” with previous selections on the same dataspace + * (#H5S_SELECT_OR). In the latter case, the resulting selection is the union of the selection and all + * previously selected selections. Arbitrary sets of points from a dataspace can be selected by specifying + * an appropriate set of selections. + * + * Two selections are used in data transfer, so the source and destination must be compatible, as described + * below. + * + * There are two forms of selection, hyperslab and point. A selection must be either a point selection or a + * set of hyperslab selections. Selections cannot be mixed. + * + * The definition of a selection within a dataspace, not the data in the selection, cannot be saved to the + * file unless the selection definition is saved as a region reference. For more information, + * see \ref subsec_dataspace_refer. + * + *

    Hyperslab Selection

    + * + * A hyperslab is a selection of elements from a hyper rectangle. An HDF5 hyperslab is a rectangular pattern + * defined by four arrays. The four arrays are summarized in the table below. + * + * The offset defines the origin of the hyperslab in the original dataspace. + * + * The stride is the number of elements to increment between selected elements. A stride of ‘1’ is every + * element, a stride of ‘2’ is every second element, etc. Note that there may be a different stride for + * each dimen‐sion of the dataspace. The default stride is 1. + * + * The count is the number of elements in the hyperslab selection. When the stride is 1, the selection is a + * hyper rectangle with a corner at the offset and size count[0] by count[1] by.... When stride is greater + * than one, the hyperslab bounded by the offset and the corners defined by stride[n] * count[n]. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    Hyperslab elements
    + * Parameter + * + * Description + *
    + * Offset + * + * The starting location for the hyperslab. + *
    + * Stride + * + * The number of elements to separate each element or block to be selected. + *
    + * Count + * + * The number of elements or blocks to select along each dimension. + *
    + * Block + * + * The size of the block selected from the dataspace. + *
    + * + * The block is a count on the number of repetitions of the hyperslab. The default block size is '1', which is + * one hyperslab. A block of 2 would be two hyperslabs in that dimension, with the second starting at + * offset[n] + (count[n] * stride[n]) + 1. + * + * A hyperslab can be used to access a sub‐set of a large dataset. The figure below shows an example of a + * hyperslab that reads a rectangle from the middle of a larger two dimensional array. The destination is the + * same shape as the source. + * + * + * + * + * + *
    + * \image html Dspace_subset.gif "Access a sub‐set of data with a hyperslab" + *
    + * + * Hyperslabs can be combined to select complex regions of the source and destination. The figure below + * shows an example of a transfer from one non‐rectangular region into another non‐rectangular region. The + * source is defined as the union of two hyperslabs, and the destination is the union of three hyperslabs. + * + * + * + * + * + *
    + * \image html Dspace_complex.gif "Build complex regions with hyperslab unions" + *
    + * + * Hyperslabs may also be used to collect or scatter data from regular patterns. The figure below shows an + * example where the source is a repeating pattern of blocks, and the destination is a single, one dimensional + * array. + * + * + * + * + * + *
    + * \image html Dspace_combine.gif "Use hyperslabs to combine or disperse data" + *
    + * + *

    Select Points

    + * + * The second type of selection is an array of points such as coordinates. Essentially, this selection is a + * list of all the points to include. The figure below shows an example of a transfer of seven elements from + * a two dimensional dataspace to a three dimensional dataspace using a point selection to specify the points. + * + * + * + * + * + *
    + * \image html Dspace_point.gif "Point selection" + *
    + * + *

    Rules for Defining Selections

    + * + * A selection must have the same number of dimensions (rank) as the dataspace it is applied to, although it + * may select from only a small region such as a plane from a 3D dataspace. Selections do not affect the + * extent of the dataspace, the selection may be larger than the dataspace. The boundaries of selections are + * reconciled with the extent at the time of the data transfer. + * + *

    Data Transfer with Selections

    + * + * A data transfer (read or write) with selections is the same as any read or write, except the source + * and destination dataspace have compatible selections. + * + * During the data transfer, the following steps are executed by the library: + * \li The source and destination dataspaces are checked to assure that the selections are compatible. + *
    • Each selection must be within the current extent of the dataspace. A selection may be + * defined to extend outside the current extent of the dataspace, but the dataspace cannot be + * accessed if the selection is not valid at the time of the access.
    • + *
    • The total number of points selected in the source and destination must be the same. Note + * that the dimensionality of the source and destination can be different (for example, the + * source could be 2D, the destination 1D or 3D), and the shape can be different, but the number of + * elements selected must be the same.
    + * \li The data is transferred, element by element. + * + * Selections have an iteration order for the points selected, which can be any permutation of the dimensions + * involved (defaulting to 'C' array order) or a specific order for the selected points, for selections + * composed of single array elements with \ref H5Sselect_elements. + * + * The elements of the selections are transferred in row‐major, or C order. That is, it is assumed that the + * first dimension varies slowest, the second next slowest, and so forth. For hyperslab selections, the order + * can be any permutation of the dimensions involved (defaulting to ‘C’ array order). When multiple hyperslabs + * are combined, the hyperslabs are coalesced into contiguous reads and writes. + * + * In the case of point selections, the points are read and written in the order specified. + * + * \subsubsection subsubsec_dataspace_transfer_model Programming Model + * + *

    Selecting Hyperslabs

    + * + * Suppose we want to read a 3x4 hyperslab from a dataset in a file beginning at the element <1,2> in the + * dataset, and read it into a 7 x 7 x 3 array in memory. See the figure below. In order to do this, we must + * create a dataspace that describes the overall rank and dimensions of the dataset in the file as well as + * the position and size of the hyperslab that we are extracting from that dataset. + * + * + * + * + * + *
    + * \image html Dspace_select.gif "Selecting a hyperslab" + *
    + * + * The code in the first example below illustrates the selection of the hyperslab in the file dataspace. + * The second example below shows the definition of the destination dataspace in memory. Since the in‐memory + * dataspace has three dimensions, the hyperslab is an array with three dimensions with the last dimension + * being 1: <3,4,1>. The third example below shows the read using the source and destination dataspaces + * with selections. + * + * Selecting a hyperslab + * \code + * //get the file dataspace. + * dataspace = H5Dget_space(dataset); // dataspace identifier + * + * // Define hyperslab in the dataset. + * offset[0] = 1; + * offset[1] = 2; + * count[0] = 3; + * count[1] = 4; + * status = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, NULL, count, NULL); + * \endcode + * + * Defining the destination memory + * \code + * // Define memory dataspace. + * dimsm[0] = 7; + * dimsm[1] = 7; + * dimsm[2] = 3; + * memspace = H5Screate_simple(3,dimsm,NULL); + * + * // Define memory hyperslab. + * offset_out[0] = 3; + * offset_out[1] = 0; + * offset_out[2] = 0; + * count_out[0] = 3; + * count_out[1] = 4; + * count_out[2] = 1; + * status = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, offset_out, NULL, count_out, NULL); + * \endcode + * + * A sample read specifying source and destination dataspaces + * \code + * ret = H5Dread(dataset, H5T_NATIVE_INT, memspace,dataspace, H5P_DEFAULT, data); + * \endcode + * + *

    Example with Strides and Blocks

    + * + * Consider an 8 x 12 dataspace into which we want to write eight 3 x 2 blocks in a two dimensional array + * from a source dataspace in memory that is a 50‐element one dimensional array. See the figure below. + * + * + * + * + * + *
    + * \image html Dspace_write1to2.gif "Write from a one dimensional array to a two dimensional array" + *
    + * + * The example below shows code to write 48 elements from the one dimensional array to the file dataset + * starting with the second element in vector. The destination hyperslab has the following parameters: + * offset=(0,1), stride=(4,3), count=(2,4), block=(3,2). The source has the parameters: offset=(1), + * stride=(1), count=(48), block=(1). After these operations, the file dataspace will have the values + * shown in item b in the figure above. Notice that the values are inserted in the file dataset in + * row‐major order. + * + * Write from a one dimensional array to a two dimensional array + * \code + * // Select hyperslab for the dataset in the file, using 3 x 2 blocks, (4,3) stride (2,4) + * // count starting at the position (0,1). + * offset[0] = 0; offset[1] = 1; + * stride[0] = 4; stride[1] = 3; + * count[0] = 2; count[1] = 4; + * block[0] = 3; block[1] = 2; + * ret = H5Sselect_hyperslab(fid, H5S_SELECT_SET, offset, stride, count, block); + * + * // Create dataspace for the first dataset. + * mid1 = H5Screate_simple(MSPACE1_RANK, dim1, NULL); + * + * // Select hyperslab. + * // We will use 48 elements of the vector buffer starting + * // at the second element. Selected elements are + * // 1 2 3 . . . 48 + * offset[0] = 1; + * stride[0] = 1; + * count[0] = 48; + * block[0] = 1; + * ret = H5Sselect_hyperslab(mid1, H5S_SELECT_SET, offset, stride, count, block); + * + * // Write selection from the vector buffer to the dataset in the file. + * ret = H5Dwrite(dataset, H5T_NATIVE_INT, midd1, fid, H5P_DEFAULT, vector) + * \endcode + * + *

    Selecting a Union of Hyperslabs

    + * + * The HDF5 Library allows the user to select a union of hyperslabs and write or read the selection into + * another selection. The shapes of the two selections may differ, but the number of elements must be + * equal. + * + * + * + * + * + *
    + * \image html Dspace_transfer.gif "Transferring hyperslab unions" + *
    + * + * The figure above shows the transfer of a selection that is two overlapping hyperslabs from the dataset + * into a union of hyperslabs in the memory dataset. Note that the destination dataset has a different shape + * from the source dataset. Similarly, the selection in the memory dataset could have a different shape than + * the selected union of hyperslabs in the original file. For simplicity, the selection is that same shape + * at the destination. + * + * To implement this transfer, it is necessary to: + * \li 1. Get the source dataspace + * \li 2. Define one hyperslab selection for the source + * \li 3. Define a second hyperslab selection, unioned with the first + * \li 4. Get the destination dataspace + * \li 5. Define one hyperslab selection for the destination + * \li 6. Define a second hyperslab selection, unioned with the first + * \li 7. Execute the data transfer (H5Dread or H5Dwrite) using the source and destination dataspaces + * + * The example below shows example code to create the selections for the source dataspace (the file). The + * first hyperslab is size 3 x 4 and the left upper corner at the position (1,2). The hyperslab is a simple + * rectangle, so the stride and block are 1. The second hyperslab is 6 x 5 at the position (2,4). The second + * selection is a union with the first hyperslab (#H5S_SELECT_OR). + * + * Select source hyperslabs + * \code + * fid = H5Dget_space(dataset); + * + * // Select first hyperslab for the dataset in the file. + * offset[0] = 1; offset[1] = 2; + * block[0] = 1; block[1] = 1; + * stride[0] = 1; stride[1] = 1; + * count[0] = 3; count[1] = 4; + * ret = H5Sselect_hyperslab(fid, H5S_SELECT_SET, offset, stride, count, block); + * + * // Add second selected hyperslab to the selection. + * offset[0] = 2; offset[1] = 4; + * block[0] = 1; block[1] = 1; + * stride[0] = 1; stride[1] = 1; + * count[0] = 6; count[1] = 5; + * ret = H5Sselect_hyperslab(fid, H5S_SELECT_OR, offset, stride, count, block); + * \endcode + * + * The example below shows example code to create the selection for the destination in memory. The steps + * are similar. In this example, the hyperslabs are the same shape, but located in different positions in the + * dataspace. The first hyperslab is 3 x 4 and starts at (0,0), and the second is 6 x 5 and starts at (1,2). + * Finally, the H5Dread call transfers the selected data from the file dataspace to the selection in memory. + * In this example, the source and destination selections are two overlapping rectangles. In general, any + * number of rectangles can be OR’ed, and they do not have to be contiguous. The order of the selections + * does not matter, but the first should use #H5S_SELECT_SET ; subsequent selections are unioned using + * #H5S_SELECT_OR. + * + * It is important to emphasize that the source and destination do not have to be the same shape (or number + * of rectangles). As long as the two selections have the same number of elements, the data can be + * transferred. + * + * Select destination hyperslabs + * \code + * // Create memory dataspace. + * mid = H5Screate_simple(MSPACE_RANK, mdim, NULL); + * + * // Select two hyperslabs in memory. Hyperslabs has the + * // same size and shape as the selected hyperslabs for + * // the file dataspace. + * offset[0] = 0; offset[1] = 0; + * block[0] = 1; block[1] = 1; + * stride[0] = 1; stride[1] = 1; + * count[0] = 3; count[1] = 4; + * ret = H5Sselect_hyperslab(mid, H5S_SELECT_SET, offset, stride, count, block); + * + * offset[0] = 1; offset[1] = 2; + * block[0] = 1; block[1] = 1; + * stride[0] = 1; stride[1] = 1; + * count[0] = 6; count[1] = 5; + * ret = H5Sselect_hyperslab(mid, H5S_SELECT_OR, offset, stride, count, block); + * + * ret = H5Dread(dataset, H5T_NATIVE_INT, mid, fid, H5P_DEFAULT, matrix_out); + * \endcode + * + *

    Selecting a List of Independent Points

    + * + * It is also possible to specify a list of elements to read or write using the function H5Sselect_elements. + * + * The procedure is similar to hyperslab selections. + * \li 1. Get the source dataspace + * \li 2. Set the selected points + * \li 3. Get the destination dataspace + * \li 4. Set the selected points + * \li 5. Transfer the data using the source and destination dataspaces + * + * The figure below shows an example where four values are to be written to four separate points in a two + * dimensional dataspace. The source dataspace is a one dimensional array with the values 53, 59, 61, 67. + * The destination dataspace is an 8 x 12 array. The elements are to be written to the points + * (0,0), (3,3), (3,5), and (5,6). In this example, the source does not require a selection. The example + * below the figure shows example code to implement this transfer. + * + * A point selection lists the exact points to be transferred and the order they will be transferred. The + * source and destination are required to have the same number of elements. A point selection can be used + * with a hyperslab (for example, the source could be a point selection and the destination a hyperslab, + * or vice versa), so long as the number of elements selected are the same. + * + * + * + * + * + *
    + * \image html Dspace_separate.gif "Write data to separate points" + *
    + * + * Write data to separate points + * \code + * hsize_t dim2[] = {4}; + * int values[] = {53, 59, 61, 67}; + * + * // file dataspace + * hssize_t coord[4][2]; + * + * // Create dataspace for the second dataset. + * mid2 = H5Screate_simple(1, dim2, NULL); + * + * // Select sequence of NPOINTS points in the file dataspace. + * coord[0][0] = 0; coord[0][1] = 0; + * coord[1][0] = 3; coord[1][1] = 3; + * coord[2][0] = 3; coord[2][1] = 5; + * coord[3][0] = 5; coord[3][1] = 6; + * + * ret = H5Sselect_elements(fid, H5S_SELECT_SET, NPOINTS, (const hssize_t **)coord); + * + * ret = H5Dwrite(dataset, H5T_NATIVE_INT, mid2, fid, H5P_DEFAULT, values); + * \endcode + * + *

    Combinations of Selections

    + * + * Selections are a very flexible mechanism for reorganizing data during a data transfer. With different + * combinations of dataspaces and selections, it is possible to implement many kinds of data transfers + * including sub‐setting, sampling, and reorganizing the data. The table below gives some example combinations + * of source and destination, and the operations they implement. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    Selection operations
    + *

    Source

    + *
    + *

    Destination

    + *
    + *

    Operation

    + *
    + *

    All

    + *
    + *

    All

    + *
    + *

    Copy whole array

    + *
    + *

    All

    + *
    + *

    All (different shape)

    + *
    + *

    Copy and reorganize array

    + *
    + *

    Hyperslab

    + *
    + *

    All

    + *
    + *

    Sub-set

    + *
    + *

    Hyperslab

    + *
    + *

    Hyperslab (same shape)

    + *
    + *

    Selection

    + *
    + *

    Hyperslab

    + *
    + *

    Hyperslab (different shape)

    + *
    + *

    Select and rearrange

    + *
    + *

    Hyperslab with stride or block

    + *
    + *

    All or hyperslab with stride 1

    + *
    + *

    Sub-sample, scatter

    + *
    + *

    Hyperslab

    + *
    + *

    Points

    + *
    + *

    Scatter

    + *
    + *

    Points

    + *
    + *

    Hyperslab or all

    + *
    + *

    Gather

    + *
    + *

    Points

    + *
    + *

    Points (same)

    + *
    + *

    Selection

    + *
    + *

    Points

    + *
    + *

    Points (different)

    + *
    + *

    Reorder points

    + *
    + * + * \subsection subsec_dataspace_select Dataspace Selection Operations and Data Transfer + * + * This section is under construction. + * + * \subsection subsec_dataspace_refer References to Dataset Regions + * + * Another use of selections is to store a reference to a region of a dataset. An HDF5 object reference + * object is a pointer to an object (dataset, group, or committed datatype) in the file. A selection can + * be used to create a pointer to a set of selected elements of a dataset, called a region reference. The + * selection can be either a point selection or a hyperslab selection. + * + * A region reference is an object maintained by the HDF5 Library. The region reference can be stored in a + * dataset or attribute, and then read. The dataset or attribute is defined to have the special datatype, + * #H5T_STD_REF_DSETREG. + * + * To discover the elements and/or read the data, the region reference can be dereferenced. The + * #H5Rdereference call returns an identifier for the dataset, and then the selected dataspace can be + * retrieved with a call to #H5Rget_region(). The selected dataspace can be used to read the selected data + * elements. + * + * For more information, \see subsubsec_datatype_other_refs. + * + * \subsubsection subsubsec_dataspace_refer_use Example Uses for Region References + * + * Region references are used to implement stored pointers to data within a dataset. For example, features + * in a large dataset might be indexed by a table. See the figure below. This table could be stored as an + * HDF5 dataset with a compound datatype, for example, with a field for the name of the feature and a region + * reference to point to the feature in the dataset. See the second figure below. + * + * + * + * + * + *
    + * \image html Dspace_features.gif " Features indexed by a table" + *
    + * + * + * + * + * + *
    + * \image html Dspace_features_cmpd.gif "Storing the table with a compound datatype" + *
    + * + * + * \subsubsection subsubsec_dataspace_refer_create Creating References to Regions + * + * To create a region reference: + * \li 1. Create or open the dataset that contains the region + * \li 2. Get the dataspace for the dataset + * \li 3. Define a selection that specifies the region + * \li 4. Create a region reference using the dataset and dataspace with selection + * \li 5. Write the region reference(s) to the desired dataset or attribute + * + * The figure below shows a diagram of a file with three datasets. Dataset D1 and D2 are two dimensional + * arrays of integers. Dataset R1 is a one dimensional array of references to regions in D1 and D2. The + * regions can be any valid selection of the dataspace of the target dataset. + * + * + * + * + *
    + * \image html Dspace_three_datasets.gif "A file with three datasets" + *
    + * Note: In the figure above, R1 is a 1 D array of region pointers; each pointer refers to a selection + * in one dataset. + * + * The example below shows code to create the array of region references. The references are created in an + * array of type #hdset_reg_ref_t. Each region is defined as a selection on the dataspace of the dataset, + * and a reference is created using \ref H5Rcreate(). The call to \ref H5Rcreate() specifies the file, + * dataset, and the dataspace with selection. + * + * Create an array of region references + * \code + * // create an array of 4 region references + * hdset_reg_ref_t ref[4]; + * + * // Create a reference to the first hyperslab in the first Dataset. + * offset[0] = 1; offset[1] = 1; + * count[0] = 3; count[1] = 2; + * status = H5Sselect_hyperslab(space_id, H5S_SELECT_SET, offset, NULL, count, NULL); + * status = H5Rcreate(&ref[0], file_id, "D1", H5R_DATASET_REGION, space_id); + * + * // The second reference is to a union of hyperslabs in the first Dataset + * offset[0] = 5; offset[1] = 3; + * count[0] = 1; count[1] = 4; + * status = H5Sselect_none(space_id); + * status = H5Sselect_hyperslab(space_id, H5S_SELECT_SET, offset, NULL, count, NULL); + * offset[0] = 6; offset[1] = 5; + * count[0] = 1; count[1] = 2; + * status = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, offset, NULL, count, NULL); + * status = H5Rcreate(&ref[1], file_id, "D1", H5R_DATASET_REGION, space_id); + * + * // the fourth reference is to a selection of points in the first Dataset + * status = H5Sselect_none(space_id); + * coord[0][0] = 4; coord[0][1] = 4; + * coord[1][0] = 2; coord[1][1] = 6; + * coord[2][0] = 3; coord[2][1] = 7; + * coord[3][0] = 1; coord[3][1] = 5; + * coord[4][0] = 5; coord[4][1] = 8; + * + * status = H5Sselect_elements(space_id, H5S_SELECT_SET, num_points, (const hssize_t **)coord); + * status = H5Rcreate(&ref[3], file_id, "D1", H5R_DATASET_REGION, space_id); + * + * // the third reference is to a hyperslab in the second Dataset + * offset[0] = 0; offset[1] = 0; + * count[0] = 4; count[1] = 6; + * status = H5Sselect_hyperslab(space_id2, H5S_SELECT_SET, offset, NULL, count, NULL); + * status = H5Rcreate(&ref[2], file_id, "D2", H5R_DATASET_REGION, space_id2); + * \endcode + * + * When all the references are created, the array of references is written to the dataset R1. The + * dataset is declared to have datatype #H5T_STD_REF_DSETREG. See the example below. + * + * Write the array of references to a dataset + * \code + * Hsize_t dimsr[1]; + * dimsr[0] = 4; + * + * // Dataset with references. + * spacer_id = H5Screate_simple(1, dimsr, NULL); + * dsetr_id = H5Dcreate(file_id, "R1", H5T_STD_REF_DSETREG, spacer_id, H5P_DEFAULT, H5P_DEFAULT, + * H5P_DEFAULT); + * + * // Write dataset with the references. + * status = H5Dwrite(dsetr_id, H5T_STD_REF_DSETREG, H5S_ALL, H5S_ALL, H5P_DEFAULT, ref); + * + * \endcode + * + * When creating region references, the following rules are enforced. + * \li The selection must be a valid selection for the target dataset, just as when transferring data + * \li The dataset must exist in the file when the reference is created; #H5Rcreate + * \li The target dataset must be in the same file as the stored reference + * + * \subsubsection subsubsec_dataspace_refer_read Reading References to Regions + * + * To retrieve data from a region reference, the reference must be read from the file, and then the data can + * be retrieved. The steps are: + * \li 1. Open the dataset or attribute containing the reference objects + * \li 2. Read the reference object(s) + * \li 3. For each region reference, get the dataset (#H5Rdereference) and dataspace (#H5Rget_region) + * \li 4. Use the dataspace and datatype to discover what space is needed to store the data, allocate the + * correct storage and create a dataspace and datatype to define the memory data layout + * + * The example below shows code to read an array of region references from a dataset, and then read the + * data from the first selected region. Note that the region reference has information that records the + * dataset (within the file) and the selection on the dataspace of the dataset. After dereferencing the + * regions reference, the datatype, number of points, and some aspects of the selection can be discovered. + * (For a union of hyperslabs, it may not be possible to determine the exact set of hyperslabs that has been + * combined.) + * The table below the code example shows the inquiry functions. + * + * When reading data from a region reference, the following rules are enforced: + * \li The target dataset must be present and accessible in the file + * \li The selection must be a valid selection for the dataset + * + * Read an array of region references; read from the first selection + * \code + * dsetr_id = H5Dopen (file_id, "R1", H5P_DEFAULT); + * status = H5Dread(dsetr_id, H5T_STD_REF_DSETREG, H5S_ALL, H5S_ALL, H5P_DEFAULT, ref_out); + * + * // Dereference the first reference. + * // 1) get the dataset (H5Rdereference) + * // 2) get the selected dataspace (H5Rget_region) + * + * dsetv_id = H5Rdereference(dsetr_id, H5R_DATASET_REGION, &ref_out[0]); + * space_id = H5Rget_region(dsetr_id, H5R_DATASET_REGION, &ref_out[0]); + * + * // Discover how many points and shape of the data + * ndims = H5Sget_simple_extent_ndims(space_id); + * H5Sget_simple_extent_dims(space_id,dimsx,NULL); + * + * // Read and display hyperslab selection from the dataset. + * dimsy[0] = H5Sget_select_npoints(space_id); + * spacex_id = H5Screate_simple(1, dimsy, NULL); + * + * status = H5Dread(dsetv_id, H5T_NATIVE_INT, H5S_ALL, space_id, H5P_DEFAULT, data_out); + * printf("Selected hyperslab: "); + * for (i = 0; i < 8; i++) { + * printf("\n"); + * for (j = 0; j < 10; j++) + * printf("%d ", data_out[i][j]); + * } + * printf("\n"); + * \endcode + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    The inquiry functions
    + *

    Function

    + *
    + *

    Information

    + *
    + * @ref H5Sget_select_npoints + * + *

    The number of elements in the selection (hyperslab or point selection).

    + *
    + * @ref H5Sget_select_bounds + * + *

    The bounding box that encloses the selected points (hyperslab or point selection).

    + *
    + * @ref H5Sget_select_hyper_nblocks + * + *

    The number of blocks in the selection.

    + *
    + * @ref H5Sget_select_hyper_blocklist + * + *

    A list of the blocks in the selection.

    + *
    + * @ref H5Sget_select_elem_npoints + * + *

    The number of points in the selection.

    + *
    + * @ref H5Sget_select_elem_pointlist + * + *

    The points.

    + *
    + * + * + * \subsection subsec_dataspace_sample Sample Programs + * + * This section contains the full programs from which several of the code examples in this chapter were + * derived. The h5dump output from the program’s output file immediately follows each program. + * + * h5_write.c + * \code + * #include "hdf5.h" + * + * #define H5FILE_NAME "SDS.h5" + * #define DATASETNAME "C Matrix" + * #define NX 3 + * #define NY 5 + * #define RANK 2 // dataset dimensions + * + * int + * main (void) + * { + * hid_t file, dataset; // file and dataset identifiers + * hid_t datatype, dataspace; // identifiers + * hsize_t dims[2]; // dataset dimensions + * herr_t status; + * int data[NX][NY]; // data to write + * int i, j; + * + * // + * // Data and output buffer initialization. + * for (j = 0; j < NX; j++) { + * for (i = 0; i < NY; i++) + * data[j][i] = i + 1 + j*NY; + * } + * // 1 2 3 4 5 + * // 6 7 8 9 10 + * // 11 12 13 14 15 + * + * // Create a new file using H5F_ACC_TRUNC access, + * // default file creation properties, and default file + * // access properties. + * file = H5Fcreate(H5FILE_NAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + * + * // Describe the size of the array and create the data space for fixed + * // size dataset. + * dims[0] = NX; + * dims[1] = NY; + * dataspace = H5Screate_simple(RANK, dims, NULL); + * + * // Create a new dataset within the file using defined dataspace and + * // datatype and default dataset creation properties. + * dataset = H5Dcreate(file, DATASETNAME, H5T_NATIVE_INT, dataspace, H5P_DEFAULT, + * H5P_DEFAULT, H5P_DEFAULT); + * + * // Write the data to the dataset using default transfer properties. + * status = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data); + * + * // Close/release resources. + * H5Sclose(dataspace); + * H5Dclose(dataset); + * H5Fclose(file); + * + * return 0; + * } + * + * SDS.out + * ------- + * HDF5 "SDS.h5" { + * GROUP "/" { + * DATASET "C Matrix" { + * DATATYPE H5T_STD_I32BE + * DATASPACE SIMPLE { ( 3, 5 ) / ( 3, 5 ) } + * DATA { + * 1, 2, 3, 4, 5, + * 6, 7, 8, 9, 10, + * 11, 12, 13, 14, 15 + * } + * } + * + * \endcode + * + * h5_write.f90 + * \code + * ---------- + * PROGRAM DSETEXAMPLE + * + * USE HDF5 ! This module contains all necessary modules + * + * IMPLICIT NONE + * + * CHARACTER(LEN=7), PARAMETER :: filename = "SDSf.h5" ! File name + * CHARACTER(LEN=14), PARAMETER :: dsetname = "Fortran Matrix" ! Dataset name + * INTEGER, PARAMETER :: NX = 3 + * INTEGER, PARAMETER :: NY = 5 + * + * INTEGER(HID_T) :: file_id ! File identifier + * INTEGER(HID_T) :: dset_id ! Dataset identifier + * INTEGER(HID_T) :: dspace_id ! Dataspace identifier + * + * INTEGER(HSIZE_T), DIMENSION(2) :: dims = (/3,5/) ! Dataset dimensions + * INTEGER :: rank = 2 ! Dataset rank + * INTEGER :: data(NX,NY) + * INTEGER :: error ! Error flag + * INTEGER :: i, j + * + * ! + * ! Initialize data + * ! + * do i = 1, NX + * do j = 1, NY + * data(i,j) = j + (i-1)*NY + * enddo + * enddo + * ! + * ! Data + * ! + * ! 1 2 3 4 5 + * ! 6 7 8 9 10 + * ! 11 12 13 14 15 + * + * ! + * ! Initialize FORTRAN interface. + * ! + * CALLh5open_f(error) + * + * ! + * ! Create a new file using default properties. + * ! + * CALL h5fcreate_f(filename, H5F_ACC_TRUNC_F, file_id, error) + * + * ! + * ! Create the dataspace. + * ! + * CALL h5screate_simple_f(rank, dims, dspace_id, error) + * + * ! + * ! Create and write dataset using default properties. + * ! + * CALL h5dcreate_f(file_id, dsetname, H5T_NATIVE_INTEGER, dspace_id, & + * dset_id, error, H5P_DEFAULT_F, H5P_DEFAULT_F, & + * H5P_DEFAULT_F) + * + * CALL h5dwrite_f(dset_id, H5T_NATIVE_INTEGER, data, dims, error) + * + * ! + * ! End access to the dataset and release resources used by it. + * ! + * CALL h5dclose_f(dset_id, error) + * + * ! + * ! Terminate access to the data space. + * ! + * CALL h5sclose_f(dspace_id, error) + * + * ! + * ! Close the file. + * ! + * CALL h5fclose_f(file_id, error) + * + * ! + * ! Close FORTRAN interface. + * ! + * CALL h5close_f(error) + * + * END PROGRAM DSETEXAMPLE + * + * SDSf.out + * -------- + * HDF5 "SDSf.h5" { + * GROUP "/" { + * DATASET "Fortran Matrix" { + * DATATYPE H5T_STD_I32BE + * DATASPACE SIMPLE { ( 5, 3 ) / ( 5, 3 ) } + * DATA { + * 1, 6, 11, + * 2, 7, 12, + * 3, 8, 13, + * 4, 9, 14, + * 5, 10, 15 + * } + * } + * } + * } + * + * \endcode + * + * h5_write_tr.f90 + * \code + * PROGRAM DSETEXAMPLE + * + * USE HDF5 ! This module contains all necessary modules + * + * IMPLICIT NONE + * + * CHARACTER(LEN=10), PARAMETER :: filename = "SDSf_tr.h5" ! File name + * CHARACTER(LEN=24), PARAMETER :: dsetname = "Fortran Transpose Matrix"! Dataset name + * + * INTEGER, PARAMETER :: NX = 3 + * INTEGER, PARAMETER :: NY = 5 + * + * INTEGER(HID_T) :: file_id ! File identifier + * INTEGER(HID_T) :: dset_id ! Dataset identifier + * INTEGER(HID_T) :: dspace_id ! Dataspace identifier + * + * INTEGER(HSIZE_T), DIMENSION(2) :: dims = (/NY, NX/) ! Dataset dimensions + * INTEGER :: rank = 2 ! Dataset rank + * INTEGER :: data(NY,NX) + * + * INTEGER :: error ! Error flag + * INTEGER :: i, j + * + * ! + * ! Initialize data + * ! + * do i = 1, NY + * do j = 1, NX + * data(i,j) = i + (j-1)*NY + * enddo + * enddo + * + * ! + * ! Data + * ! + * ! 1 6 11 + * ! 2 7 12 + * ! 3 8 13 + * ! 4 9 14 + * ! 5 10 15 + * + * ! + * ! Initialize FORTRAN interface. + * ! + * CALL h5open_f(error) + * + * ! + * ! Create a new file using default properties. + * ! + * CALL h5fcreate_f(filename, H5F_ACC_TRUNC_F, file_id, error) + * + * ! + * ! Create the dataspace. + * ! + * CALL h5screate_simple_f(rank, dims, dspace_id, error) + * + * ! + * ! Create and write dataset using default properties. + * ! + * CALL h5dcreate_f(file_id, dsetname, H5T_NATIVE_INTEGER, dspace_id, & + * dset_id, error, H5P_DEFAULT_F, H5P_DEFAULT_F, & + * H5P_DEFAULT_F) + * CALL h5dwrite_f(dset_id, H5T_NATIVE_INTEGER, data, dims, error) + * + * ! + * ! End access to the dataset and release resources used by it. + * ! + * CALL h5dclose_f(dset_id, error) + * + * ! + * ! Terminate access to the data space. + * ! + * CALL h5sclose_f(dspace_id, error) + * + * ! + * ! Close the file. + * ! + * CALL h5fclose_f(file_id, error) + * + * ! + * ! Close FORTRAN interface. + * ! + * CALL h5close_f(error) + * + * END PROGRAM DSETEXAMPLE + * + * SDSf_tr.out + * ----------- + * HDF5 "SDSf_tr.h5" { + * GROUP "/" { + * DATASET "Fortran Transpose Matrix" { + * DATATYPE H5T_STD_I32LE + * DATASPACE SIMPLE { ( 3, 5 ) / ( 3, 5 ) } + * DATA { + * 1, 2, 3, 4, 5, + * 6, 7, 8, 9, 10, + * 11, 12, 13, 14, 15 + * } + * } + * } + * } + * + * \endcode + * + * Previous Chapter \ref sec_datatype - Next Chapter \ref sec_attribute + * + */ + +/** + * \defgroup H5S Dataspaces (H5S) * * Use the functions in this module to manage HDF5 dataspaces \Emph{and} selections. * @@ -40,6 +1527,7 @@ * using \Emph{selections}. Furthermore, certain set operations are supported * for selections. * + * */ #endif /* H5Smodule_H */ diff --git a/src/H5Tmodule.h b/src/H5Tmodule.h index 8f7d04d..f631007 100644 --- a/src/H5Tmodule.h +++ b/src/H5Tmodule.h @@ -28,7 +28,3837 @@ #define H5_MY_PKG H5T #define H5_MY_PKG_ERR H5E_DATATYPE -/**\defgroup H5T H5T +/** \page H5T_UG HDF5 Datatypes + * + * \section sec_datatype HDF5 Datatypes + * HDF5 datatypes describe the element type of HDF5 datasets and attributes. + * There's a large set of predefined datatypes, but users may find it useful + * to define new datatypes through a process called \Emph{derivation}. + * + * The element type is automatically persisted as part of the HDF5 metadata of + * attributes and datasets. Additionally, datatype definitions can be persisted + * to HDF5 files and linked to groups as HDF5 datatype objects or so-called + * \Emph{committed datatypes}. + * + * \subsection subsec_datatype_intro Introduction and Definitions + * + * An HDF5 dataset is an array of data elements, arranged according to the specifications + * of the dataspace. In general, a data element is the smallest addressable unit of storage + * in the HDF5 file. (Compound datatypes are the exception to this rule.) The HDF5 datatype + * defines the storage format for a single data element. See the figure below. + * + * The model for HDF5 attributes is extremely similar to datasets: an attribute has a dataspace + * and a data type, as shown in the figure below. The information in this chapter applies to both + * datasets and attributes. + * + * + * + * + * + *
    + * \image html Dtypes_fig1.gif "Datatypes, dataspaces, and datasets" + *
    + * + * Abstractly, each data element within the dataset is a sequence of bits, interpreted as a single + * value from a set of values (for example, a number or a character). For a given datatype, there is a + * standard or convention for representing the values as bits, and when the bits are represented in a + * particular storage the bits are laid out in a specific storage scheme such as 8-bit bytes with a + * specific ordering and alignment of bytes within the storage array. + * + * HDF5 datatypes implement a flexible, extensible, and portable mechanism for specifying and + * discovering the storage layout of the data elements, determining how to interpret the elements + * (for example, as floating point numbers), and for transferring data from different compatible + * layouts. + * + * An HDF5 datatype describes one specific layout of bits. A dataset has a single datatype which + * applies to every data element. When a dataset is created, the storage datatype is defined. After + * the dataset or attribute is created, the datatype cannot be changed. + * \li The datatype describes the storage layout of a singledata element + * \li All elements of the dataset must have the same type + * \li The datatype of a dataset is immutable + * + * When data is transferred (for example, a read or write), each end point of the transfer has a + * datatype, which describes the correct storage for the elements. The source and destination may + * have different (but compatible) layouts, in which case the data elements are automatically + * transformed during the transfer. + * + * HDF5 datatypes describe commonly used binary formats for numbers (integers + * and floating point) and characters (ASCII). A given computing architecture and programming language + * supports certain number and character representations. For example, a computer may support 8-, + * 16-, 32-, and 64-bit signed integers, stored in memory in little-endian byte order. These would + * presumably correspond to the C programming language types \Emph{char}, \Emph{short}, + * \Emph{int}, and \Emph{long}. + * + * When reading and writing from memory, the HDF5 library must know the appropriate datatype + * that describes the architecture specific layout. The HDF5 library provides the platform + * independent \Emph{NATIVE} types, which are mapped to an appropriate datatype for each platform. + * So the type #H5T_NATIVE_INT is an alias for the appropriate descriptor for each platform. + * + * Data in memory has a datatype: + * \li The storage layout in memory is architecture-specific + * \li The HDF5 \Emph{NATIVE} types are predefined aliases for the architecture-specific memory layout + * \li The memory datatype need not be the same as the stored datatype of the dataset + * + * In addition to numbers and characters, an HDF5 datatype can describe more abstract classes of + * types including enumerations, strings, bit strings, and references (pointers to objects in the HDF5 + * file). HDF5 supports several classes of composite datatypes which are combinations of one or + * more other datatypes. In addition to the standard predefined datatypes, users can define new + * datatypes within the datatype classes. + * + * The HDF5 datatype model is very general and flexible: + * \li For common simple purposes, only predefined types will be needed + * \li Datatypes can be combined to create complex structured datatypes + * \li If needed, users can define custom atomic datatypes + * \li Committed datatypes can be shared by datasets or attributes + * + * \subsection subsec_datatype_model Datatype Model + * The HDF5 library implements an object-oriented model of datatypes. HDF5 datatypes are + * organized as a logical set of base types, or datatype classes. Each datatype class defines + * a format for representing logical values as a sequence of bits. For example the #H5T_INTEGER + * class is a format for representing twos complement integers of various sizes. + * + * A datatype class is defined as a set of one or more datatype properties. A datatype property is + * a property of the bit string. The datatype properties are defined by the logical model of the + * datatype class. For example, the integer class (twos complement integers) has properties such as + * “signed or unsigned”, “length”, and “byte-order”. The float class (IEEE floating point numbers) + * has these properties, plus “exponent bits”, “exponent sign”, etc. + * + * A datatype is derived from one datatype class: a given datatype has a specific value for the + * datatype properties defined by the class. For example, for 32-bit signed integers, stored + * big-endian, the HDF5 datatype is a sub-type of integer with the properties set to + * signed=1, size=4(bytes), and byte-order=BE. + * + * The HDF5 datatype API (H5T functions) provides methods to create datatypes of different + * datatype classes, to set the datatype properties of a new datatype, and to discover the datatype + * properties of an existing datatype. + * + * The datatype for a dataset is stored in the HDF5 file as part of the metadata for the dataset. + * A datatype can be shared by more than one dataset in the file if the datatype is saved to the + * file with a name. This shareable datatype is known as a committed datatype. In the past, + * this kind of datatype was called a named datatype. + * + * When transferring data (for example, a read or write), the data elements of the source and + * destination storage must have compatible types. As a general rule, data elements with the same + * datatype class are compatible while elements from different datatype classes are not compatible. + * When transferring data of one datatype to another compatible datatype, the HDF5 Library uses + * the datatype properties of the source and destination to automatically transform each data + * element. For example, when reading from data stored as 32-bit signed integers, big + * endian into 32-bit signed integers, little-endian, the HDF5 Library will automatically swap the + * bytes. + * + * Thus, data transfer operations (\ref H5Dread, \ref H5Dwrite, \ref H5Aread, \ref H5Awrite) require + * a datatype for both the source and the destination. + * + * + * + * + * + *
    + * \image html Dtypes_fig2.gif "The datatype model" + *
    + * + * The HDF5 library defines a set of predefined datatypes, corresponding to commonly used + * storage formats, such as twos complement integers, IEEE Floating point numbers, etc., 4- + * and 8-byte sizes, big-endian and little-endian byte orders. In addition, a user can derive types with + * custom values for the properties. For example, a user program may create a datatype to describe + * a 6-bit integer, or a 600-bit floating point number. + * + * In addition to atomic datatypes, the HDF5 library supports composite datatypes. A composite + * datatype is an aggregation of one or more datatypes. Each class of composite datatypes has + * properties that describe the organization of the composite datatype. See the figure below. + * Composite datatypes include: + * \li Compound datatypes: structured records + * \li Array: a multidimensional array of a datatype + * \li Variable-length: a one-dimensional array of a datatype + * + * + * + * + * + *
    + * \image html Dtypes_fig3.gif "Composite datatypes" + *
    + * + * \subsubsection subsubsec_datatype_model_class Datatype Classes and Properties + * The figure below shows the HDF5 datatype classes. Each class is defined to have a set of + * properties which describe the layout of the data element and the interpretation of the bits. The + * table below lists the properties for the datatype classes. + * + * + * + * + * + *
    + * \image html Dtypes_fig4.gif "Datatype classes" + *
    + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    Datatype classes and their properties
    + * Class + * + * Description + * + * Properties + * + * Notes + *
    + * Integer + * + * Twos complement integers + * + * Size (bytes), precision (bits), offset (bits), pad, byte order, signed/unsigned + * + *
    + * Float + * + * Floating Point numbers + * + * Size (bytes), precision (bits), offset (bits), pad, byte order, sign position, + * exponent position, exponent size (bits), exponent sign, exponent bias, mantissa position, + * mantissa (size) bits, mantissa sign, mantissa normalization, internal padding + * + * See IEEE 754 for a definition of these properties. These properties describe + * non-IEEE 754 floating point formats as well. + *
    + * Character + * + * Array of 1-byte character encoding + * + * Size (characters), Character set, byte order, pad/no pad, pad character + * + * Currently, ASCII and UTF-8 are supported. + *
    + * Bitfield + * + * String of bits + * + * Size (bytes), precision (bits), offset (bits), pad, byte order + * + * A sequence of bit values packed into one or more bytes. + *
    + * Opaque + * + * Uninterpreted data + * + * Size (bytes), precision (bits), offset (bits), pad, byte order, tag + * + * A sequence of bytes, stored and retrieved as a block. + * The ‘tag’ is a string that can be used to label the value. + *
    + * Enumeration + * + * A list of discrete values, with symbolic names in the form of strings. + * + * Number of elements, element names, element values + * + * Enumeration is a list of pairs (name, value). The name is a string; the + * value is an unsigned integer. + *
    + * Reference + * + * Reference to object or region within the HDF5 file + * + * + * + * @see H5R + *
    + * Array + * + * Array (1-4 dimensions) of data elements + * + * Number of dimensions, dimension sizes, base datatype + * + * The array is accessed atomically: no selection or sub-setting. + *
    + * Variable-length + * + * A variable-length 1-dimensional array of data elements + * + * Current size, base type + * + * + *
    + * Compound + * + * A Datatype of a sequence of Datatypes + * + * Number of members, member names, member types, member offset, member class, + * member size, byte order + * + * + *
    + * + * \subsubsection subsubsec_datatype_model_predefine Predefined Datatypes + * The HDF5 library predefines a modest number of commonly used datatypes. These types have + * standard symbolic names of the form H5T_arch_base where arch is an architecture name and + * base is a programming type name Table 2. New types can be derived from the predefined + * types by copying the predefined type \ref H5Tcopy() and then modifying the result. + * + * The base name of most types consists of a letter to indicate the class Table 3, a precision in + * bits, and an indication of the byte order Table 4. + * + * Table 5 shows examples of predefined datatypes. The full list can be found in the + * \ref PDT section of the \ref RM. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    Table 2. Architectures used in predefined datatypes
    + * Architecture Name + * + * Description + *
    + * IEEE + * + * IEEE-754 standard floating point types in various byte orders. + *
    + * STD + * + * This is an architecture that contains semi-standard datatypes like signed + * two’s complement integers, unsigned integers, and bitfields in various + * byte orders. + *
    + * C
    FORTRAN + *
    + * Types which are specific to the C or Fortran programming languages + * are defined in these architectures. For instance, #H5T_C_S1 defines a + * base string type with null termination which can be used to derive string + * types of other lengths. + *
    + * NATIVE + * + * This architecture contains C-like datatypes for the machine on which + * the library was compiled. The types were actually defined by running + * the H5detect program when the library was compiled. In order to be + * portable, applications should almost always use this architecture + * to describe things in memory. + *
    + * CRAY + * + * Cray architectures. These are word-addressable, big-endian systems + * with non-IEEE floating point. + *
    + * INTEL + * + * All Intel and compatible CPU’s. + * These are little-endian systems with IEEE floating-point. + *
    + * MIPS + * + * All MIPS CPU’s commonly used in SGI systems. These are big-endian + * systems with IEEE floating-point. + *
    + * ALPHA + * + * All DEC Alpha CPU’s, little-endian systems with IEEE floating-point. + *
    + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    Table 3. Base types
    + * Base + * + * Description + *
    + * B + * + * Bitfield + *
    + * F + * + * Floating point + *
    + * I + * + * Signed integer + *
    + * R + * + * References + *
    + * S + * + * Character string + *
    + * U + * + * Unsigned integer + *
    + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    Table 4. Byte order
    + * Order + * + * Description + *
    + * BE + * + * Big-endian + *
    + * LE + * + * Little-endian + *
    + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    Table 5. Some predefined datatypes
    + * Example + * + * Description + *
    + * #H5T_IEEE_F64LE + * + * Eight-byte, little-endian, IEEE floating-point + *
    + * #H5T_IEEE_F32BE + * + * Four-byte, big-endian, IEEE floating point + *
    + * #H5T_STD_I32LE + * + * Four-byte, little-endian, signed two’s complement integer + *
    + * #H5T_STD_U16BE + * + * Two-byte, big-endian, unsigned integer + *
    + * #H5T_C_S1 + * + * One-byte,null-terminated string of eight-bit characters + *
    + * #H5T_INTEL_B64 + * + * Eight-byte bit field on an Intel CPU + *
    + * #H5T_STD_REF_OBJ + * + * Reference to an entire object in a file + *
    + * + * The HDF5 library predefines a set of \Emph{NATIVE} datatypes which are similar to C type names. + * The native types are set to be an alias for the appropriate HDF5 datatype for each platform. For + * example, #H5T_NATIVE_INT corresponds to a C int type. On an Intel based PC, this type is the same as + * #H5T_STD_I32LE, while on a MIPS system this would be equivalent to #H5T_STD_I32BE. Table 6 shows + * examples of \Emph{NATIVE} types and corresponding C types for a common 32-bit workstation. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    Table 6. Native and 32-bit C datatypes
    + * Example + * + * Corresponding C Type + *
    + * #H5T_NATIVE_CHAR + * + * char + *
    + * #H5T_NATIVE_SCHAR + * + * signed char + *
    + * #H5T_NATIVE_UCHAR + * + * unsigned char + *
    + * #H5T_NATIVE_SHORT + * + * short + *
    + * #H5T_NATIVE_USHORT + * + * unsigned short + *
    + * #H5T_NATIVE_INT + * + * int + *
    + * #H5T_NATIVE_UINT + * + * unsigned + *
    + * #H5T_NATIVE_LONG + * + * long + *
    + * #H5T_NATIVE_ULONG + * + * unsigned long + *
    + * #H5T_NATIVE_LLONG + * + * long long + *
    + * #H5T_NATIVE_ULLONG + * + * unsigned long long + *
    + * #H5T_NATIVE_FLOAT + * + * float + *
    + * #H5T_NATIVE_DOUBLE + * + * double + *
    + * #H5T_NATIVE_LDOUBLE + * + * long double + *
    + * #H5T_NATIVE_HSIZE + * + * hsize_t + *
    + * #H5T_NATIVE_HSSIZE + * + * hssize_t + *
    + * #H5T_NATIVE_HERR + * + * herr_t + *
    + * #H5T_NATIVE_HBOOL + * + * hbool_t + *
    + * #H5T_NATIVE_B8 + * + * 8-bit unsigned integer or 8-bit buffer in memory + *
    + * #H5T_NATIVE_B16 + * + * 16-bit unsigned integer or 16-bit buffer in memory + *
    + * #H5T_NATIVE_B32 + * + * 32-bit unsigned integer or 32-bit buffer in memory + *
    + * #H5T_NATIVE_B64 + * + * 64-bit unsigned integer or 64-bit buffer in memory + *
    + * + * \subsection subsec_datatype_usage How Datatypes are Used + * + * \subsubsection subsubsec_datatype_usage_object The Datatype Object and the HDF5 Datatype API + * The HDF5 library manages datatypes as objects. The HDF5 datatype API manipulates the + * datatype objects through C function calls. New datatypes can be created from scratch or + * copied from existing datatypes. When a datatype is no longer needed its resources should be released by + * calling \ref H5Tclose(). + * + * The datatype object is used in several roles in the HDF5 data model and library. Essentially, a + * datatype is used whenever the form at of data elements is needed. There are four major uses of + * datatypes in the HDF5 library: at dataset creation, during data transfers, when discovering the + * contents of a file, and for specifying user-defined datatypes. See the table below. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    Table 7. Datatype uses
    + * Use + * + * Description + *
    + * Dataset creation + * + * The datatype of the data elements must be declared when the dataset is created. + *
    + * Dataset transfer + * + * The datatype (format) of the data elements must be defined for both the source and destination. + *
    + * Discovery + * + * The datatype of a dataset can be interrogated to retrieve a complete description of the storage layout. + *
    + * Creating user-defined datatypes + * + * Users can define their own datatypes by creating datatype objects and setting their properties. + *
    + * + * \subsubsection subsubsec_datatype_usage_create Dataset Creation + * All the data elements of a dataset have the same datatype. When a dataset is created, the datatype + * for the data elements must be specified. The datatype of a dataset can never be changed. The + * example below shows the use of a datatype to create a dataset called “/dset”. In this example, the + * dataset will be stored as 32-bit signed integers in big-endian order. + * + * Using a datatype to create a dataset + * \code + * hid_t dt; + * + * dt = H5Tcopy(H5T_STD_I32BE); + * dataset_id = H5Dcreate(file_id, “/dset”, dt, dataspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + * \endcode + * + * \subsubsection subsubsec_datatype_usage_transfer Data Transfer (Read and Write) + * Probably the most common use of datatypes is to write or read data from a dataset or attribute. In + * these operations, each data element is transferred from the source to the destination (possibly + * rearranging the order of the elements). Since the source and destination do not need to be + * identical (in other words, one is disk and the other is memory), the transfer requires + * both the format of the source element and the destination element. Therefore, data transfers use two + * datatype objects, for the source and destination. + * + * When data is written, the source is memory and the destination is disk (file). The memory + * datatype describes the format of the data element in the machine memory, and the file datatype + * describes the desired format of the data element on disk. Similarly, when reading, the source + * datatype describes the format of the data element on disk, and the destination datatype describes + * the format in memory. + * + * In the most common cases, the file datatype is the datatype specified when + * the dataset was + * created, and the memory datatype should be the appropriate \Emph{NATIVE} type. + * The examples below show samples of writing data to and reading data from a dataset. The data + * in memory is declared C type ‘int’, and the datatype #H5T_NATIVE_INT corresponds to this + * type. The datatype of the dataset should be of datatype class #H5T_INTEGER. + * + * Writing to a dataset + * \code + * int dset_data[DATA_SIZE]; + * + * status = H5Dwrite(dataset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, dset_data); + * \endcode + * + * Reading from a dataset + * \code + * int dset_data[DATA_SIZE]; + * + * status = H5Dread(dataset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, dset_data); + * \endcode + * + * \subsubsection subsubsec_datatype_usage_discover Discovery of Data Format + * The HDF5 Library enables a program to + * determine the datatype class and properties for any + * datatype. In order to discover the storage format of data in a dataset, the datatype is obtained, and + * the properties are determined by queries to the datatype object. The example below shows code + * that analyzes the datatype for an integer and prints out a description of its storage properties + * (byte order, signed, size). + * + * Discovering datatype properties + * \code + * switch (H5Tget_class(type)) { + * case H5T_INTEGER: + * ord = H5Tget_order(type); + * sgn = H5Tget_sign(type); + * printf(“Integer ByteOrder= ”); + * switch (ord) { + * case H5T_ORDER_LE: + * printf(“LE”); + * break; + * case H5T_ORDER_BE: + * printf(“BE”); + * break; + * } + * printf(“ Sign= ”); + * switch (sgn) { + * case H5T_SGN_NONE: + * printf(“false”); + * break; + * case H5T_SGN_2: + * printf(“true”); + * break; + * } + * printf(“ Size= ”); + * sz = H5Tget_size(type); + * printf(“%d”, sz); + * printf(“\n”); + * break; + * case H5T_???? + * ... + * break; + * } + * \endcode + * + * \subsubsection subsubsec_datatype_usage_user Creating and Using User‐defined Datatypes + * Most programs will primarily use the predefined datatypes described above, possibly in + * composite data types such as compound or array datatypes. However, the HDF5 datatype model + * is extremely general; a user program can define a great variety of atomic datatypes (storage + * layouts). In particular, the datatype properties can define signed and unsigned integers of any + * size and byte order, and floating point numbers with different formats, size, and byte order. The + * HDF5 datatype API provides methods to set these properties. + * + * User-defined types can be used to define the layout of data in memory; examples might match + * some platform specific number format or application defined bit-field. The user-defined type can + * also describe data in the file such as an application-defined format. The user-defined types can be + * translated to and from standard types of the same class, as described above. + * + * \subsection subsec_datatype_function Datatype Function Summaries + * @see H5T reference manual provides a reference list of datatype functions, the H5T APIs. + * + * \subsection subsec_datatype_program Programming Model for Datatypes + * The HDF5 Library implements an object-oriented model of datatypes. HDF5 datatypes are + * organized as a logical set of base types, or datatype classes. The HDF5 Library manages + * datatypes as objects. The HDF5 datatype API manipulates the datatype objects through C + * function calls. The figure below shows the abstract view of the datatype object. The table below + * shows the methods (C functions) that operate on datatype objects. New datatypes can be created + * from scratch or copied from existing datatypes. + * + * + * + * + * + *
    + * \image html Dtypes_fig5.gif "The datatype object" + *
    + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    Table 8. General operations on datatype objects
    + * API Function + * + * Description + *
    + * \ref hid_t \ref H5Tcreate (\ref H5T_class_t class, size_t size) + * + * Create a new datatype object of datatype class . The following datatype classes care supported + * with this function: + * \li #H5T_COMPOUND + * \li #H5T_OPAQUE + * \li #H5T_ENUM + * \li Other datatypes are created with \ref H5Tcopy(). + *
    + * \ref hid_t \ref H5Tcopy (\ref hid_t type) + * + * Obtain a modifiable transient datatype which is a copy of type. If type is a dataset identifier + * then the type returned is a modifiable transient copy of the datatype of the specified dataset. + *
    + * \ref hid_t \ref H5Topen (\ref hid_t location, const char *name, #H5P_DEFAULT) + * + * Open a committed datatype. The committed datatype returned by this function is read-only. + *
    + * \ref htri_t \ref H5Tequal (\ref hid_t type1, \ref hid_t type2) + * + * Determines if two types are equal. + *
    + * \ref herr_t \ref H5Tclose (\ref hid_t type) + * + * Releases resources associated with a datatype obtained from \ref H5Tcopy, \ref H5Topen, or + * \ref H5Tcreate. It is illegal to close an immutable transient datatype (for example, predefined types). + *
    + * \ref herr_t \ref H5Tcommit (\ref hid_t location, const char *name, hid_t type, + * #H5P_DEFAULT, #H5P_DEFAULT, #H5P_DEFAULT) + * + * Commit a transient datatype (not immutable) to a file to become a committed datatype. Committed + * datatypes can be shared. + *
    + * \ref htri_t \ref H5Tcommitted (\ref hid_t type) + * + * Test whether the datatype is transient or committed (named). + *
    + * \ref herr_t \ref H5Tlock (\ref hid_t type) + * + * Make a transient datatype immutable (read-only and not closable). Predefined types are locked. + *
    + * + * In order to use a datatype, the object must be created (\ref H5Tcreate), or a reference obtained by + * cloning from an existing type (\ref H5Tcopy), or opened (\ref H5Topen). In addition, a reference to the + * datatype of a dataset or attribute can be obtained with \ref H5Dget_type or \ref H5Aget_type. For + * composite datatypes a reference to the datatype for members or base types can be obtained + * (\ref H5Tget_member_type, \ref H5Tget_super). When the datatype object is no longer needed, the + * reference is discarded with \ref H5Tclose. + * + * Two datatype objects can be tested to see if they are the same with \ref H5Tequal. This function + * returns true if the two datatype references refer to the same datatype object. However, if two + * datatype objects define equivalent datatypes (the same datatype class and datatype properties), + * they will not be considered ‘equal’. + * + * A datatype can be written to the file as a first class object (\ref H5Tcommit). This is a committed + * datatype and can be used in thesame way as any other datatype. + * + * \subsubsection subsubsec_datatype_program_discover Discovery of Datatype Properties + * Any HDF5 datatype object can be queried to discover all of its datatype properties. For each + * datatype class, there are a set of API functions to retrieve the datatype properties for this class. + * + *

    Properties of Atomic Datatypes

    + * Table 9 lists the functions to discover the properties of atomic datatypes. Table 10 lists the + * queries relevant to specific numeric types. Table 11 gives the properties for atomic string + * datatype, and Table 12 gives the property of the opaque datatype. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    Table 9. Functions to discover properties of atomic datatypes
    + * API Function + * + * Description + *
    + * \ref H5T_class_t \ref H5Tget_class (\ref hid_t type) + * + * The datatype class: #H5T_INTEGER, #H5T_FLOAT, #H5T_STRING, #H5T_BITFIELD, #H5T_OPAQUE, #H5T_COMPOUND, + * #H5T_REFERENCE, #H5T_ENUM, #H5T_VLEN, #H5T_ARRAY + *
    + * size_t \ref H5Tget_size (\ref hid_t type) + * + * The total size of the element in bytes, including padding which may appear on either side of the + * actual value. + *
    + * \ref H5T_order_t \ref H5Tget_order (\ref hid_t type) + * + * The byte order describes how the bytes of the datatype are laid out in memory. If the lowest memory + * address contains the least significant byte of the datum then it is said to be little-endian or + * #H5T_ORDER_LE. If the bytes are in the opposite order then they are said to be big-endianor #H5T_ORDER_BE. + *
    + * size_t \ref H5Tget_precision (\ref hid_t type) + * + * The precision property identifies the number of significant bits of a datatype and the offset property + * (defined below) identifies its location. Some datatypes occupy more bytes than what is needed to store + * the value. For instance, a short on a Cray is 32 significant bits in an eight-byte field. + *
    + * int \ref H5Tget_offset (\ref hid_t type) + * + * The offset property defines the bit location of the least significant bit of a bit field whose length + * is precision. + *
    + * \ref herr_t \ref H5Tget_pad (\ref hid_t type, \ref H5T_pad_t *lsb, \ref H5T_pad_t *msb) + * + * Padding is the bits of a data element which are not significant as defined by the precision and offset + * properties. Padding in the low-numbered bits is lsb padding and padding in the high-numbered bits is msb + * padding. Padding bits can be set to zero (#H5T_PAD_ZERO) or one (#H5T_PAD_ONE). + *
    + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    Table 10. Functions to discover properties of atomic datatypes
    + * API Function + * + * Description + *
    + * \ref H5T_sign_t \ref H5Tget_sign (\ref hid_t type) + * + * (INTEGER)Integer data can be signed two’s complement (#H5T_SGN_2) or unsigned (#H5T_SGN_NONE). + *
    + * \ref herr_t \ref H5Tget_fields (\ref hid_t type, size_t *spos, size_t *epos, size_t *esize, + * size_t*mpos, size_t *msize) + * + * (FLOAT)A floating-point data element has bit fields which are the exponent and mantissa as well as a + * mantissa sign bit. These properties define the location (bit position of least significant bit of the + * field) and size (in bits) of each field. The sign bit is always of length one and none of the fields + * are allowed to overlap. + *
    + * size_t \ref H5Tget_ebias (\ref hid_t type) + * + * (FLOAT)A floating-point data element has bit fields which are the exponent and + * mantissa as well as a mantissa sign bit. These properties define the location (bit + * position of least significant bit of the field) and size (in bits) of + * each field. The sign bit is always of length one and none of the + * fields are allowed to overlap. + *
    + * \ref H5T_norm_t \ref H5Tget_norm (\ref hid_t type) + * + * (FLOAT)This property describes the normalization method of the mantissa. + *
    • #H5T_NORM_MSBSET: the mantissa is shifted left (if non-zero) until the first bit + * after the radix point is set and the exponent is adjusted accordingly. All bits of the + * mantissa after the radix point are stored.
    • + *
    • #H5T_NORM_IMPLIED: the mantissa is shifted left \(if non-zero) until the first + * bit after the radix point is set and the exponent is adjusted accordingly. The first + * bit after the radix point is not stored since it’s always set.
    • + *
    • #H5T_NORM_NONE: the fractional part of the mantissa is stored without normalizing it.
    + *
    + * \ref H5T_pad_t \ref H5Tget_inpad (\ref hid_t type) + * + * (FLOAT)If any internal bits (that is, bits between the sign bit, the mantissa field, + * and the exponent field but within the precision field) are unused, then they will be + * filled according to the value of this property. The padding can be: + * #H5T_PAD_BACKGROUND, #H5T_PAD_ZERO,or #H5T_PAD_ONE. + *
    + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    Table 11. Functions to discover properties of atomic string datatypes
    + * API Function + * + * Description + *
    + * \ref H5T_cset_t \ref H5Tget_cset (\ref hid_t type) + * + * Two character sets are currently supported: + * ASCII (#H5T_CSET_ASCII) and UTF-8 (#H5T_CSET_UTF8). + *
    + * \ref H5T_str_t \ref H5Tget_strpad (\ref hid_t type) + * + * The string datatype has a fixed length, but the string may be shorter than the length. + * This property defines the storage mechanism for the left over bytes. The options are: + * \li #H5T_STR_NULLTERM + * \li #H5T_STR_NULLPAD + * \li #H5T_STR_SPACEPAD. + *
    + * + * + * + * + * + * + * + * + * + * + * + *
    Table 12. Functions to discover properties of atomic opaque datatypes
    + * API Function + * + * Description + *
    + * char* \ref H5Tget_tag(\ref hid_t type_id) + * + * A user-defined string. + *
    + * + *

    Properties of Composite Datatypes

    + * The composite datatype classes can also be analyzed to discover their datatype properties and the + * datatypes that are members or base types of the composite datatype. The member or base type + * can, in turn, be analyzed. The table below lists the functions that can access the datatype + * properties of the different composite datatypes. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    Table 13. Functions to discover properties of composite datatypes
    + * API Function + * + * Description + *
    + * int \ref H5Tget_nmembers(\ref hid_t type_id) + * + * (COMPOUND)The number of fields in the compound datatype. + *
    + * \ref H5T_class_t \ref H5Tget_member_class (\ref hid_t cdtype_id, unsigned member_no) + * + * (COMPOUND)The datatype class of compound datatype member member_no. + *
    + * char* \ref H5Tget_member_name (\ref hid_t type_id, unsigned field_idx) + * + * (COMPOUND)The name of field field_idx of a compound datatype. + *
    + * size_t \ref H5Tget_member_offset (\ref hid_t type_id, unsigned memb_no) + * + * (COMPOUND)The byte offset of the beginning of a field within a compound datatype. + *
    + * \ref hid_t \ref H5Tget_member_type (\ref hid_t type_id, unsigned field_idx) + * + * (COMPOUND)The datatype of the specified member. + *
    + * int \ref H5Tget_array_ndims (\ref hid_t adtype_id) + * + * (ARRAY)The number of dimensions (rank) of the array datatype object. + *
    + * int \ref H5Tget_array_dims (\ref hid_t adtype_id, hsize_t *dims[]) + * + * (ARRAY)The sizes of the dimensions and the dimension permutations of the array datatype object. + *
    + * \ref hid_t \ref H5Tget_super(\ref hid_t type) + * + * (ARRAY, VL, ENUM)The base datatype from which the datatype type is derived. + *
    + * \ref herr_t \ref H5Tenum_nameof(\ref hid_t type, const void *value, char *name, size_t size) + * + * (ENUM)The symbol name that corresponds to the specified value of the enumeration datatype. + *
    + * \ref herr_t \ref H5Tenum_valueof(\ref hid_t type, const char *name, void *value) + * + * (ENUM)The value that corresponds to the specified name of the enumeration datatype. + *
    + * \ref herr_t \ref H5Tget_member_value (\ref hid_t type unsigned memb_no, void *value) + * + * (ENUM)The value of the enumeration datatype member memb_no. + *
    + * + * \subsubsection subsubsec_datatype_program_define Definition of Datatypes + * The HDF5 library enables user programs to create and modify datatypes. The essential steps are: + *
    • 1. Create a new datatype object of a specific composite datatype class, or copy an existing + * atomic datatype object
    • + *
    • 2. Set properties of the datatype object
    • + *
    • 3. Use the datatype object
    • + *
    • 4. Close the datatype object
    + * + * To create a user-defined atomic datatype, the procedure is to clone a predefined datatype of the + * appropriate datatype class (\ref H5Tcopy), and then set the datatype properties appropriate to the + * datatype class. The table below shows how to create a datatype to describe a 1024-bit unsigned + * integer. + * + * Create a new datatype + * \code + * hid_t new_type = H5Tcopy (H5T_NATIVE_INT); + * + * H5Tset_precision(new_type, 1024); + * H5Tset_sign(new_type, H5T_SGN_NONE); + * \endcode + * + * Composite datatypes are created with a specific API call for each datatype class. The table below + * shows the creation method for each datatype class. A newly created datatype cannot be used until the + * datatype properties are set. For example, a newly created compound datatype has no members and cannot + * be used. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    Table 14. Functions to create each datatype class
    + * Datatype Class + * + * Function to Create + *
    + * COMPOUND + * + * #H5Tcreate + *
    + * OPAQUE + * + * #H5Tcreate + *
    + * ENUM + * + * #H5Tenum_create + *
    + * ARRAY + * + * #H5Tarray_create + *
    + * VL + * + * #H5Tvlen_create + *
    + * + * Once the datatype is created and the datatype properties set, the datatype object can be used. + * + * Predefined datatypes are defined by the library during initialization using the same mechanisms + * as described here. Each predefined datatype is locked (\ref H5Tlock), so that it cannot be changed or + * destroyed. User-defined datatypes may also be locked using \ref H5Tlock. + * + *

    User-defined Atomic Datatypes

    + * Table 15 summarizes the API methods that set properties of atomic types. Table 16 shows + * properties specific to numeric types, Table 17 shows properties specific to the string datatype + * class. Note that offset, pad, etc. do not apply to strings. Table 18 shows the specific property of + * the OPAQUE datatype class. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    Table 15. API methods that set properties of atomic datatypes
    + * Functions + * + * Description + *
    + * \ref herr_t \ref H5Tset_size (\ref hid_t type, size_t size) + * + * Set the total size of the element in bytes. This includes padding which may appear on either + * side of the actual value. If this property is reset to a smaller value which would cause the + * significant part of the data to extend beyond the edge of the datatype, then the offset property + * is decremented a bit at a time. If the offset reaches zero and the significant part of the data + * still extends beyond the edge of the datatype then the precision property is decremented a bit at + * a time. Decreasing the size of a datatype may fail if the #H5T_FLOAT bit fields would extend beyond + * the significant part of the type. + *
    + * \ref herr_t \ref H5Tset_order (\ref hid_t type, \ref H5T_order_t order) + * + * Set the byte order to little-endian (#H5T_ORDER_LE) or big-endian (#H5T_ORDER_BE). + *
    + * \ref herr_t \ref H5Tset_precision (\ref hid_t type, size_t precision) + * + * Set the number of significant bits of a datatype. The offset property (defined below) identifies + * its location. The size property defined above represents the entire size (in bytes) of the datatype. + * If the precision is decreased then padding bits are inserted on the MSB side of the significant + * bits (this will fail for #H5T_FLOAT types if it results in the sign,mantissa, or exponent bit field + * extending beyond the edge of the significant bit field). On the other hand, if the precision is + * increased so that it “hangs over” the edge of the total size then the offset property is decremented + * a bit at a time. If the offset reaches zero and the significant bits still hang over the edge, then + * the total size is increased a byte at a time. + *
    + * \ref herr_t \ref H5Tset_offset (\ref hid_t type, size_t offset) + * + * Set the bit location of the least significant bit of a bit field whose length is precision. The + * bits of the entire data are numbered beginning at zero at the least significant bit of the least + * significant byte (the byte at the lowest memory address for a little-endian type or the byte at + * the highest address for a big-endian type). The offset property defines the bit location of the + * least significant bit of a bit field whose length is precision. If the offset is increased so the + * significant bits “hang over” the edge of the datum, then the size property is automatically incremented. + *
    + * \ref herr_t \ref H5Tset_pad (\ref hid_t type, \ref H5T_pad_t lsb, \ref H5T_pad_t msb) + * + * Set the padding to zeros (#H5T_PAD_ZERO) or ones (#H5T_PAD_ONE). Padding is the bits of a + * data element which are not significant as defined by the precision and offset properties. Padding + * in the low-numbered bits is lsb padding and padding in the high-numbered bits is msb padding. + *
    + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    Table 16. API methods that set properties of numeric datatypes
    + * Functions + * + * Description + *
    + * \ref herr_t \ref H5Tset_sign (\ref hid_t type, \ref H5T_sign_t sign) + * + * (INTEGER)Integer data can be signed two’s complement (#H5T_SGN_2) or unsigned (#H5T_SGN_NONE). + *
    + * \ref herr_t \ref H5Tset_fields (\ref hid_t type, size_t spos, size_t epos, size_t esize, + * size_t mpos, size_t msize) + * + * (FLOAT)Set the properties define the location (bit position of least significant bit of the field) + * and size (in bits) of each field. The sign bit is always of length one and none of the fields are + * allowed to overlap. + *
    + * \ref herr_t \ref H5Tset_ebias (\ref hid_t type, size_t ebias) + * + * (FLOAT)The exponent is stored as a non-negative value which is ebias larger than the true exponent. + *
    + * \ref herr_t \ref H5Tset_norm (\ref hid_t type, \ref H5T_norm_t norm) + * + * (FLOAT)This property describes the normalization method of the mantissa. + *
    • #H5T_NORM_MSBSET: the mantissa is shifted left (if non-zero) until the first bit + * after theradix point is set and the exponent is adjusted accordingly. All bits of the + * mantissa after the radix point are stored.
    • + *
    • #H5T_NORM_IMPLIED: the mantissa is shifted left (if non-zero) until the first bit + * after the radix point is set and the exponent is adjusted accordingly. The first bit after + * the radix point is not stored since it is always set.
    • + *
    • #H5T_NORM_NONE: the fractional part of the mantissa is stored without normalizing it.
    + *
    + * \ref herr_t \ref H5Tset_inpad (\ref hid_t type, \ref H5T_pad_t inpad) + * + * (FLOAT) +If any internal bits (that is, bits between the sign bit, the mantissa field, +and the exponent field but within the precision field) are unused, then they will be +filled according to the value of this property. The padding can be: + * \li #H5T_PAD_BACKGROUND + * \li #H5T_PAD_ZERO + * \li #H5T_PAD_ONE + *
    + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    Table 17. API methods that set properties of string datatypes
    + * Functions + * + * Description + *
    + * \ref herr_t \ref H5Tset_size (\ref hid_t type, size_t size) + * + * Set the length of the string, in bytes. The precision is automatically set to 8*size. + *
    + * \ref herr_t \ref H5Tset_precision (\ref hid_t type, size_t precision) + * + * The precision must be a multiple of 8. + *
    + * \ref herr_t \ref H5Tset_cset (\ref hid_t type_id, \ref H5T_cset_t cset) + * + * Two character sets are currently supported: + * \li ASCII (#H5T_CSET_ASCII) + * \li UTF-8 (#H5T_CSET_UTF8). + *
    + * \ref herr_t \ref H5Tset_strpad (\ref hid_t type_id, H5T_str_t strpad) + * + * The string datatype has a fixed length, but the string may be shorter than the length. This + * property defines the storage mechanism for the left over bytes. The method used to store + * character strings differs with the programming language: + * \li C usually null terminates strings + * \li Fortran left-justifies and space-pads strings + * + * Valid string padding values, as passed in the parameter strpad, are as follows: + * \li #H5T_STR_NULLTERM: Null terminate (as C does) + * \li #H5T_STR_NULLPAD: Pad with zeros + * \li #H5T_STR_SPACEPAD: Pad with spaces (as FORTRAN does) + *
    + * + * + * + * + * + * + * + * + * + * + * + *
    Table 18. API methods that set properties of opaque datatypes
    + * Functions + * + * Description + *
    + * \ref herr_t \ref H5Tset_tag (\ref hid_t type_id, const char *tag) + * + * Tags the opaque datatype type_id with an ASCII identifier tag. + *
    + * + *

    Examples

    + * The example below shows how to create a 128-bit little-endian signed integer type. Increasing + * the precision of a type automatically increases the total size. Note that the proper + * procedure is to begin from a type of the intended datatype class which in this case is a + * NATIVE INT. + * + * Create a new 128-bit little-endian signed integer datatype + * \code + * hid_t new_type = H5Tcopy (H5T_NATIVE_INT); + * H5Tset_precision (new_type, 128); + * H5Tset_order (new_type, H5T_ORDER_LE); + * \endcode + * + * The figure below shows the storage layout as the type is defined. The \ref H5Tcopy creates a + * datatype that is the same as #H5T_NATIVE_INT. In this example, suppose this is a 32-bit + * big-endian number (Figure a). The precision is set to 128 bits, which automatically extends + * the size to 8 bytes (Figure b). Finally, the byte order is set to little-endian (Figure c). + * + * + * + * + * + *
    + * \image html Dtypes_fig6.gif "The storage layout for a new 128-bit little-endian signed integer datatype" + *
    + * + * The significant bits of a data element can be offset from the beginning of the memory for that + * element by an amount of padding. The offset property specifies the number of bits of padding + * that appear to the “right of” the value. The table and figure below show how a 32-bit unsigned + * integer with 16-bits of precision having the value 0x1122 will be laid out in memory. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    Table 19. Memory Layout for a 32-bit unsigned integer
    + * Byte Position + * + * Big-Endian
    Offset=0 + *
    + * Big-Endian
    Offset=16 + *
    + * Little-Endian
    Offset=0 + *
    + * Little-Endian
    Offset=16 + *
    + * 0: + * + * [pad] + * + * [0x11] + * + * [0x22] + * + * [pad] + *
    + * 1: + * + * [pad] + * + * [0x22] + * + * [0x11] + * + * [pad] + *
    + * 2: + * + * [0x11] + * + * [pad] + * + * [pad] + * + * [0x22] + *
    + * 3: + * + * [0x22] + * + * [pad] + * + * [pad] + * + * [0x11] + *
    + * + * + * + * + * + *
    + * \image html Dtypes_fig7.gif "Memory Layout for a 32-bit unsigned integer" + *
    + * + * If the offset is incremented then the total size is incremented also if necessary to prevent + * significant bits of the value from hanging over the edge of the datatype. + * + * The bits of the entire data are numbered beginning at zero at the least significant bit of the least + * significant byte (the byte at the lowest memory address for a little-endian type or the byte at the + * highest address for a big-endian type). The offset property defines the bit location of the least + * significant bit of a bit field whose length is precision. If the offset is increased so the significant + * bits “hang over” the edge of the datum, then the size property is automatically incremented. + * + * To illustrate the properties of the integer datatype class, the example below shows how to create + * a user-defined datatype that describes a 24-bit signed integer that starts on the third bit of a 32-bit + * word. The datatype is specialized from a 32-bit integer, the precision is set to 24 bits, and the + * offset is set to 3. + * + * A user-defined datatype with a 24-bit signed integer + * \code + * hid_t dt; + * + * dt = H5Tcopy(H5T_SDT_I32LE); + * H5Tset_precision(dt, 24); + * H5Tset_offset(dt,3); + * H5Tset_pad(dt, H5T_PAD_ZERO, H5T_PAD_ONE); + * \endcode + * + * The figure below shows the storage layout for a data element. Note that the unused bits in the + * offset will be set to zero and the unused bits at the end will be set to one, as specified in the + * \ref H5Tset_pad call. + * + * + * + * + *
    + * \image html Dtypes_fig8.gif "A user-defined integer datatype with a range of -1,048,583 to 1,048,584" + *
    + * + * To illustrate a user-defined floating point number, the example below shows how to create a 24-bit + * floating point number that starts 5 bits into a 4 byte word. The floating point number is defined to + * have a mantissa of 19 bits (bits 5-23), an exponent of 3 bits (25-27), and the sign bit is bit 28. + * (Note that this is an illustration of what can be done and is not necessarily a floating point + * format that a user would require.) + * + * A user-defined datatype with a 24-bit floating point datatype + * \code + * hid_t dt; + * + * dt = H5Tcopy(H5T_SDT_F32LE); + * H5Tset_precision(dt, 24); + * H5Tset_fields (dt, 28, 25, 3, 5, 19); + * H5Tset_pad(dt, H5T_PAD_ZERO, H5T_PAD_ONE); + * H5Tset_inpad(dt, H5T_PAD_ZERO); + * \endcode + * + * + * + * + * + *
    + * \image html Dtypes_fig9.gif "A user-defined floating point datatype" + *
    + * The figure above shows the storage layout of a data element for this datatype. Note that there is + * an unused bit (24) between the mantissa and the exponent. This bit is filled with the inpad value + * which in this case is 0. + * + * The sign bit is always of length one and none of the fields are allowed to overlap. When + * expanding a floating-point type one should set the precision first; when decreasing the size one + * should set the field positions and sizes first. + * + *

    Composite Datatypes

    + * All composite datatypes must be user-defined; there are no predefined composite datatypes. + * + *

    Compound Datatypes

    + * The subsections below describe how to create a compound datatype and how to write and read + * data of a compound datatype. + * + *

    Defining Compound Datatypes

    + * + * Compound datatypes are conceptually similar to a C struct or Fortran derived types. The + * compound datatype defines a contiguous sequence of bytes, which are formatted using one up to + * 2^16 datatypes (members). A compound datatype may have any number of members, in any + * order, and the members may have any datatype, including compound. Thus, complex nested + * compound datatypes can be created. The total size of the compound datatype is greater than or + * equal to the sum of the size of its members, up to a maximum of 2^32 bytes. HDF5 does not + * support datatypes with distinguished records or the equivalent of C unions or Fortran + * EQUIVALENCE statements. + * + * Usually a C struct or Fortran derived type will be defined to hold a data point in memory, and the + * offsets of the members in memory will be the offsets of the struct members from the beginning + * of an instance of the struct. The HDF5 C library provides a macro #HOFFSET (s,m)to calculate + * the member’s offset. The HDF5 Fortran applications have to calculate offsets by using sizes of + * members datatypes and by taking in consideration the order of members in the Fortran derived type. + * \code + * HOFFSET(s,m) + * \endcode + * This macro computes the offset of member m within a struct s + * \code + * offsetof(s,m) + * \endcode + * This macro defined in stddef.h does exactly the same thing as the HOFFSET()macro. + * + * Note for Fortran users: Offsets of Fortran structure members correspond to the offsets within a + * packed datatype (see explanation below) stored in an HDF5 file. + * + * Each member of a compound datatype must have a descriptive name which is the key used to + * uniquely identify the member within the compound datatype. A member name in an HDF5 + * datatype does not necessarily have to be the same as the name of the member in the C struct or + * Fortran derived type, although this is often the case. Nor does one need to define all members of + * the C struct or Fortran derived type in the HDF5 compound datatype (or vice versa). + * + * Unlike atomic datatypes which are derived from other atomic datatypes, compound datatypes are + * created from scratch. First, one creates an empty compound datatype and specifies its total size. + * Then members are added to the compound datatype in any order. Each member type is inserted + * at a designated offset. Each member has a name which is the key used to uniquely identify the + * member within the compound datatype. + * + * The example below shows a way of creating an HDF5 C compound datatype to describe a + * complex number. This is a structure with two components, “real” and “imaginary”, and each + * component is a double. An equivalent C struct whose type is defined by the complex_tstruct is + * shown. + * + * A compound datatype for complex numbers in C + * \code + * typedef struct { + * double re; //real part + * double im; //imaginary part + * } complex_t; + * + * hid_t complex_id = H5Tcreate (H5T_COMPOUND, sizeof (complex_t)); + * H5Tinsert (complex_id, “real”, HOFFSET(complex_t,re), + * H5T_NATIVE_DOUBLE); + * H5Tinsert (complex_id, “imaginary”, HOFFSET(complex_t,im), + * H5T_NATIVE_DOUBLE); + * \endcode + * + * The example below shows a way of creating an HDF5 Fortran compound datatype to describe a + * complex number. This is a Fortran derived type with two components, “real” and “imaginary”, + * and each component is DOUBLE PRECISION. An equivalent Fortran TYPE whose type is defined + * by the TYPE complex_t is shown. + * + * A compound datatype for complex numbers in Fortran + * \code + * TYPE complex_t + * DOUBLE PRECISION re ! real part + * DOUBLE PRECISION im; ! imaginary part + * END TYPE complex_t + * + * CALL h5tget_size_f(H5T_NATIVE_DOUBLE, re_size, error) + * CALL h5tget_size_f(H5T_NATIVE_DOUBLE, im_size, error) + * complex_t_size = re_size + im_size + * CALL h5tcreate_f(H5T_COMPOUND_F, complex_t_size, type_id) + * offset = 0 + * CALL h5tinsert_f(type_id, “real”, offset, H5T_NATIVE_DOUBLE, error) + * offset = offset + re_size + * CALL h5tinsert_f(type_id, “imaginary”, offset, H5T_NATIVE_DOUBLE, error) + * \endcode + * + * Important Note: The compound datatype is created with a size sufficient to hold all its members. + * In the C example above, the size of the C struct and the #HOFFSET macro are used as a + * convenient mechanism to determine the appropriate size and offset. Alternatively, the size and + * offset could be manually determined: the size can be set to 16 with “real” at offset 0 and + * “imaginary” at offset 8. However, different platforms and compilers have different sizes for + * “double” and may have alignment restrictions which require additional padding within the + * structure. It is much more portable to use the #HOFFSET macro which assures that the values will + * be correct for any platform. + * + * The figure below shows how the compound datatype would be laid out assuming that + * NATIVE_DOUBLE are 64-bit numbers and that there are no alignment requirements. The total + * size of the compound datatype will be 16 bytes, the “real” component will start at byte 0, and + * “imaginary” will start at byte 8. + * + * + * + * + * + *
    + * \image html Dtypes_fig10.gif "Layout of a compound datatype" + *
    + * + * The members of a compound datatype may be any HDF5 datatype including the compound, + * array, and variable-length (VL) types. The figure and example below show the memory layout + * and code which creates a compound datatype composed of two complex values, and each + * complex value is also a compound datatype as in the figure above. + * + * + * + * + * + *
    + * \image html Dtypes_fig11.gif "Layout of a compound datatype nested in a compound datatype" + *
    + * + * Code for a compound datatype nested in a compound datatype + * \code + * typedef struct { + * complex_t x; + * complex_t y; + * } surf_t; + * + * hid_t complex_id, surf_id; // hdf5 datatypes + * + * complex_id = H5Tcreate (H5T_COMPOUND, sizeof(complex_t)); + * H5Tinsert (complex_id, “re”, HOFFSET(complex_t, re), H5T_NATIVE_DOUBLE); + * H5Tinsert (complex_id, “im”, HOFFSET(complex_t, im), H5T_NATIVE_DOUBLE); + * + * surf_id = H5Tcreate (H5T_COMPOUND, sizeof(surf_t)); + * H5Tinsert (surf_id, “x”, HOFFSET(surf_t, x), complex_id); + * H5Tinsert (surf_id, “y”, HOFFSET(surf_t, y), complex_id); + * \endcode + * + * Note that a similar result could be accomplished by creating a compound datatype and inserting + * four fields. See the figure below. This results in the same layout as the figure above. The difference + * would be how the fields are addressed. In the first case, the real part of ‘y’ is called ‘y.re’; + * in the second case it is ‘y-re’. + * + * Another compound datatype nested in a compound datatype + * \code + * typedef struct { + * complex_t x; + * complex_t y; + * } surf_t; + * + * hid_t surf_id = H5Tcreate (H5T_COMPOUND, sizeof(surf_t)); + * H5Tinsert (surf_id, “x-re”, HOFFSET(surf_t, x.re), H5T_NATIVE_DOUBLE); + * H5Tinsert (surf_id, “x-im”, HOFFSET(surf_t, x.im), H5T_NATIVE_DOUBLE); + * H5Tinsert (surf_id, “y-re”, HOFFSET(surf_t, y.re), H5T_NATIVE_DOUBLE); + * H5Tinsert (surf_id, “y-im”, HOFFSET(surf_t, y.im), H5T_NATIVE_DOUBLE); + * \endcode + * + * The members of a compound datatype do not always fill all the bytes. The #HOFFSET macro + * assures that the members will be laid out according to the requirements of the platform and + * language. The example below shows an example of a C struct which requires extra bytes of + * padding on many platforms. The second element, ‘b’, is a 1-byte character followed by an 8 byte + * double, ‘c’. On many systems, the 8-byte value must be stored on a 4-or 8-byte boundary. This + * requires the struct to be larger than the sum of the size of its elements. + * + * In the example below, sizeof and #HOFFSET are used to assure that the members are inserted at + * the correct offset to match the memory conventions of the platform. The figure below shows how + * this data element would be stored in memory, assuming the double must start on a 4-byte + * boundary. Notice the extra bytes between ‘b’ and ‘c’. + * + * A compound datatype that requires padding + * \code + * typedef struct { + * int a; + * char b; + * double c; + * } s1_t; + * + * hid_t s1_tid = H5Tcreate (H5T_COMPOUND, sizeof(s1_t)); + * H5Tinsert (s1_tid, “x-im”, HOFFSET(s1_t, a), H5T_NATIVE_INT); + * H5Tinsert (s1_tid, “y-re”, HOFFSET(s1_t, b), H5T_NATIVE_CHAR); + * H5Tinsert (s1_tid, “y-im”, HOFFSET(s1_t, c), H5T_NATIVE_DOUBLE); + * \endcode + * + * + * + * + * + *
    + * \image html Dtypes_fig12.gif "Memory layout of a compound datatype that requires padding" + *
    + * + * However, data stored on disk does not require alignment, so unaligned versions of compound + * data structures can be created to improve space efficiency on disk. These unaligned compound + * datatypes can be created by computing offsets by hand to eliminate inter-member padding, or the + * members can be packed by calling #H5Tpack (which modifies a datatype directly, so it is usually + * preceded by a call to #H5Tcopy). + * + * The example below shows how to create a disk version of the compound datatype from the + * figure above in order to store data on disk in as compact a form as possible. Packed compound + * datatypes should generally not be used to describe memory as they may violate alignment + * constraints for the architecture being used. Note also that using a packed datatype for disk + * storage may involve a higher data conversion cost. + * + * Create a packed compound datatype in C + * \code + * hid_t s2_tid = H5Tcopy (s1_tid); + * H5Tpack (s2_tid); + * \endcode + * + * The example below shows the sequence of Fortran calls to create a packed compound datatype. + * An HDF5 Fortran compound datatype never describes a compound datatype in memory and + * compound data is ALWAYS written by fields as described in the next section. Therefore packing + * is not needed unless the offset of each consecutive member is not equal to the sum of the sizes of + * the previous members. + * + * Create a packed compound datatype in Fortran + * \code + * CALL h5tcopy_f(s1_id, s2_id, error) + * CALL h5tpack_f(s2_id, error) + * \endcode + * + *

    Creating and Writing Datasets with Compound Datatypes

    + * + * Creating datasets with compound datatypes is similar to creating datasets with any other HDF5 + * datatypes. But writing and reading may be different since datasets that have compound datatypes + * can be written or read by a field (member) or subsets of fields (members). The compound + * datatype is the only composite datatype that supports “sub-setting” by the elements the datatype + * is built from. + * + * The example below shows a C example of creating and writing a dataset with a compound + * datatype. + * + * + * Create and write a dataset with a compound datatype in C + * \code + * typedef struct s1_t { + * int a; + * float b; + * double c; + * } s1_t; + * + * s1_t data[LENGTH]; + * + * // Initialize data + * for (i = 0; i < LENGTH; i++) { + * data[i].a = i; + * data[i].b = i*i; + * data[i].c = 1./(i+1); + * } + * + * ... + * + * s1_tid = H5Tcreate (H5T_COMPOUND, sizeof(s1_t)); + * H5Tinsert(s1_tid, “a_name”, HOFFSET(s1_t, a), H5T_NATIVE_INT); + * H5Tinsert(s1_tid, “b_name”, HOFFSET(s1_t, b), H5T_NATIVE_FLOAT); + * H5Tinsert(s1_tid, “c_name”, HOFFSET(s1_t, c), H5T_NATIVE_DOUBLE); + * + * ... + * + * dataset_id = H5Dcreate(file_id, “SDScompound.h5”, s1_t, + * space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + * H5Dwrite (dataset_id, s1_tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, data); + * \endcode + * + * The example below shows the content of the file written on a little-endian machine. + * Create and write a little-endian dataset with a compound datatype in C + * \code + * HDF5 “SDScompound.h5” { + * GROUP “/” { + * DATASET “ArrayOfStructures” { + * DATATYPE H5T_COMPOUND { + * H5T_STD_I32LE “a_name”; + * H5T_IEEE_F32LE “b_name”; + * H5T_IEEE_F64LE “c_name”; + * } + * DATASPACE SIMPLE { ( 3 ) / ( 3 ) } + * DATA { + * (0): { + * 0, + * 0, + * 1 + * }, + * (1): { + * 0, + * 1, + * 0.5 + * }, + * (2): { + * 0, + * 4, + * 0.333333 + * } + * } + * } + * } + * } + * \endcode + * + * It is not necessary to write the whole data at once. Datasets with compound datatypes can be + * written by field or by subsets of fields. In order to do this one has to remember to set the transfer + * property of the dataset using the H5Pset_preserve call and to define the memory datatype that + * corresponds to a field. The example below shows how float and double fields are written to the + * dataset. + * + * Writing floats and doubles to a dataset + * \code + * typedef struct sb_t { + * float b; + * double c; + * } sb_t; + * + * typedef struct sc_t { + * float b; + * double c; + * } sc_t; + * sb_t data1[LENGTH]; + * sc_t data2[LENGTH]; + * + * // Initialize data + * for (i = 0; i < LENGTH; i++) { + * data1.b = i * i; + * data2.c = 1./(i + 1); + * } + * + * ... + * + * // Create dataset as in example 15 + * + * ... + * + * // Create memory datatypes corresponding to float + * // and double datatype fields + * + * sb_tid = H5Tcreate (H5T_COMPOUND, sizeof(sb_t)); + * H5Tinsert(sb_tid, “b_name”, HOFFSET(sb_t, b), H5T_NATIVE_FLOAT); + * sc_tid = H5Tcreate (H5T_COMPOUND, sizeof(sc_t)); + * H5Tinsert(sc_tid, “c_name”, HOFFSET(sc_t, c), H5T_NATIVE_DOUBLE); + * + * ... + * + * // Set transfer property + * xfer_id = H5Pcreate(H5P_DATASET_XFER); + * H5Pset_preserve(xfer_id, 1); + * H5Dwrite (dataset_id, sb_tid, H5S_ALL, H5S_ALL, xfer_id, data1); + * H5Dwrite (dataset_id, sc_tid, H5S_ALL, H5S_ALL, xfer_id, data2); + * \endcode + * + * The figure below shows the content of the file written on a little-endian machine. Only float and + * double fields are written. The default fill value is used to initialize the unwritten integer field. + * Writing floats and doubles to a dataset on a little-endian system + * \code + * HDF5 “SDScompound.h5” { + * GROUP “/” { + * DATASET “ArrayOfStructures” { + * DATATYPE H5T_COMPOUND { + * H5T_STD_I32LE “a_name”; + * H5T_IEEE_F32LE “b_name”; + * H5T_IEEE_F64LE “c_name”; + * } + * DATASPACE SIMPLE { ( 3 ) / ( 3 ) } + * DATA { + * (0): { + * 0, + * 0, + * 1 + * }, + * (1): { + * 0, + * 1, + * 0.5 + * }, + * (2): { + * 0, + * 4, + * 0.333333 + * } + * } + * } + * } + * } + * \endcode + * + * The example below contains a Fortran example that creates and writes a dataset with a + * compound datatype. As this example illustrates, writing and reading compound datatypes in + * Fortran is always done by fields. The content of the written file is the same as shown in the + * example above. + * Create and write a dataset with a compound datatype in Fortran + * \code + * ! One cannot write an array of a derived datatype in + * ! Fortran. + * TYPE s1_t + * INTEGER a + * REAL b + * DOUBLE PRECISION c + * END TYPE s1_t + * TYPE(s1_t) d(LENGTH) + * ! Therefore, the following code initializes an array + * ! corresponding to each field in the derived datatype + * ! and writesthose arrays to the dataset + * + * INTEGER, DIMENSION(LENGTH) :: a + * REAL, DIMENSION(LENGTH) :: b + * DOUBLE PRECISION, DIMENSION(LENGTH) :: c + * + * ! Initialize data + * do i = 1, LENGTH + * a(i) = i-1 + * b(i) = (i-1) * (i-1) + * c(i) = 1./i + * enddo + * + * ... + * + * ! Set dataset transfer property to preserve partially + * ! initialized fields during write/read to/from dataset + * ! with compound datatype. + * ! + * CALL h5pcreate_f(H5P_DATASET_XFER_F, plist_id, error) + * CALL h5pset_preserve_f(plist_id, .TRUE., error) + * + * ... + * + * ! + * ! Create compound datatype. + * ! + * ! First calculate total size by calculating sizes of + * ! each member + * ! + * CALL h5tget_size_f(H5T_NATIVE_INTEGER, type_sizei, error) + * CALL h5tget_size_f(H5T_NATIVE_REAL, type_sizer, error) + * CALL h5tget_size_f(H5T_NATIVE_DOUBLE, type_sized, error) + * type_size = type_sizei + type_sizer + type_sized + * CALL h5tcreate_f(H5T_COMPOUND_F, type_size, dtype_id, error) + * ! + * ! Insert members + * ! + * ! + * ! INTEGER member + * ! + * offset = 0 + * CALL h5tinsert_f(dtype_id, “a_name”, offset, H5T_NATIVE_INTEGER, error) + * ! + * ! REAL member + * ! + * offset = offset + type_sizei + * CALL h5tinsert_f(dtype_id, “b_name”, offset, H5T_NATIVE_REAL, error) + * ! + * ! DOUBLE PRECISION member + * ! + * offset = offset + type_sizer + * CALL h5tinsert_f(dtype_id, “c_name”, offset, H5T_NATIVE_DOUBLE, error) + * ! + * ! Create the dataset with compound datatype. + * ! + * CALL h5dcreate_f(file_id, dsetname, dtype_id, dspace_id, &dset_id, error, H5P_DEFAULT_F, + * H5P_DEFAULT_F, H5P_DEFAULT_F) + * ! + * + * ... + * + * ! Create memory types. We have to create a compound + * ! datatype for each member we want to write. + * ! + * CALL h5tcreate_f(H5T_COMPOUND_F, type_sizei, dt1_id, error) + * offset = 0 + * CALL h5tinsert_f(dt1_id, “a_name”, offset, H5T_NATIVE_INTEGER, error) + * ! + * CALL h5tcreate_f(H5T_COMPOUND_F, type_sizer, dt2_id, error) + * offset = 0 + * CALL h5tinsert_f(dt2_id, “b_name”, offset, H5T_NATIVE_REAL, error) + * ! + * CALL h5tcreate_f(H5T_COMPOUND_F, type_sized, dt3_id, error) + * offset = 0 + * CALL h5tinsert_f(dt3_id, “c_name”, offset, H5T_NATIVE_DOUBLE, error) + * ! + * ! Write data by fields in the datatype. Fields order + * ! is not important. + * ! + * CALL h5dwrite_f(dset_id, dt3_id, c, data_dims, error, xfer_prp = plist_id) + * CALL h5dwrite_f(dset_id, dt2_id, b, data_dims, error, xfer_prp = plist_id) + * CALL h5dwrite_f(dset_id, dt1_id, a, data_dims, error, xfer_prp = plist_id) + * \endcode + * + *

    Reading Datasets with Compound Datatypes

    + * + * Reading datasets with compound datatypes may be a challenge. For general applications there is + * no way to know a priori the corresponding C structure. Also, C structures cannot be allocated on + * the fly during discovery of the dataset’s datatype. For general C, C++, Fortran and Java + * application the following steps will be required to read and to interpret data from the dataset with + * compound datatype: + * \li 1. Get the identifier of the compound datatype in the file with the #H5Dget_type call + * \li 2. Find the number of the compound datatype members with the #H5Tget_nmembers call + * \li 3. Iterate through compound datatype members + *
    • Get member class with the #H5Tget_member_class call
    • + *
    • Get member name with the #H5Tget_member_name call
    • + *
    • Check class type against predefined classes + *
      • #H5T_INTEGER
      • + *
      • #H5T_FLOAT
      • + *
      • #H5T_STRING
      • + *
      • #H5T_BITFIELD
      • + *
      • #H5T_OPAQUE
      • + *
      • #H5T_COMPOUND
      • + *
      • #H5T_REFERENCE
      • + *
      • #H5T_ENUM
      • + *
      • #H5T_VLEN
      • + *
      • #H5T_ARRAY
      + *
    • + *
    • If class is #H5T_COMPOUND, then go to step 2 and repeat all steps under step 3. If + * class is not #H5T_COMPOUND, then a member is of an atomic class and can be read + * to a corresponding buffer after discovering all necessary information specific to each + * atomic type (for example, size of the integer or floats, super class for enumerated and + * array datatype, and its sizes)
    + * + * The examples below show how to read a dataset with a known compound datatype. + * + * The first example below shows the steps needed to read data of a known structure. First, build a + * memory datatype the same way it was built when the dataset was created, and then second use + * the datatype in an #H5Dread call. + * + * Read a dataset using a memory datatype + * \code + * typedef struct s1_t { + * int a; + * float b; + * double c; + * } s1_t; + * + * s1_t *data; + * + * ... + * + * s1_tid = H5Tcreate(H5T_COMPOUND, sizeof(s1_t)); + * H5Tinsert(s1_tid, “a_name”, HOFFSET(s1_t, a), H5T_NATIVE_INT); + * H5Tinsert(s1_tid, “b_name”, HOFFSET(s1_t, b), H5T_NATIVE_FLOAT); + * H5Tinsert(s1_tid, “c_name”, HOFFSET(s1_t, c), H5T_NATIVE_DOUBLE); + * + * ... + * + * dataset_id = H5Dopen(file_id, “SDScompound.h5”, H5P_DEFAULT); + * + * ... + * + * data = (s1_t *) malloc (sizeof(s1_t)*LENGTH); + * H5Dread(dataset_id, s1_tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, data); + * \endcode + * + * Instead of building a memory datatype, the application could use the + * #H5Tget_native_type function. See the example below. + * + * Read a dataset using H5Tget_native_type + * \code + * typedef struct s1_t { + * int a; + * float b; + * double c; + * } s1_t; + * + * s1_t *data; + * hid_t file_s1_t, mem_s1_t; + * + * ... + * + * dataset_id = H5Dopen(file_id, “SDScompound.h5”, H5P_DEFAULT); + * // Discover datatype in the file + * file_s1_t = H5Dget_type(dataset_id); + * // Find corresponding memory datatype + * mem_s1_t = H5Tget_native_type(file_s1_t, H5T_DIR_DEFAULT); + * + * ... + * + * data = (s1_t *) malloc (sizeof(s1_t)*LENGTH); + * H5Dread (dataset_id,mem_s1_tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, data); + * \endcode + * + * The example below shows how to read just one float member of a compound datatype. + * + * Read one floating point member of a compound datatype + * \code + * typedef struct sf_t { + * float b; + * } sf_t; + * + * sf_t *data; + * + * ... + * + * sf_tid = H5Tcreate(H5T_COMPOUND, sizeof(sf_t)); + * H5Tinsert(sf_tid, “b_name”, HOFFSET(sf_t, b), H5T_NATIVE_FLOAT); + * + * ... + * + * dataset_id = H5Dopen(file_id, “SDScompound.h5”, H5P_DEFAULT); + * + * ... + * + * data = (sf_t *) malloc (sizeof(sf_t) * LENGTH); + * H5Dread(dataset_id, sf_tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, data); + * \endcode + * + * The example below shows how to read float and double members of a compound datatype into a + * structure that has those fields in a different order. Please notice that #H5Tinsert calls can be used + * in an order different from the order of the structure’s members. + * + * Read float and double members of a compound datatype + * \code + * typedef struct sdf_t { + * double c; + * float b; + * } sdf_t; + * + * sdf_t *data; + * + * ... + * + * sdf_tid = H5Tcreate(H5T_COMPOUND, sizeof(sdf_t)); + * H5Tinsert(sdf_tid, “b_name”, HOFFSET(sdf_t, b), H5T_NATIVE_FLOAT); + * H5Tinsert(sdf_tid, “c_name”, HOFFSET(sdf_t, c), H5T_NATIVE_DOUBLE); + * + * ... + * + * dataset_id = H5Dopen(file_id, “SDScompound.h5”, H5P_DEFAULT); + * + * ... + * + * data = (sdf_t *) malloc (sizeof(sdf_t) * LENGTH); + * H5Dread(dataset_id, sdf_tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, data); + * \endcode + * + *

    Array

    + * + * Many scientific datasets have multiple measurements for each point in a space. There are several + * natural ways to represent this data, depending on the variables and how they are used in + * computation. See the table and the figure below. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    Representing data with multiple measurements
    + *

    Storage Strategy

    + *
    + *

    Stored as

    + *
    + *

    Remarks

    + *
    Multiple planes + * + * Several datasets with identical dataspaces + * + * This is optimal when variables are accessed individually, or when often uses only selected + * variables. + *
    + * Additional dimension + * + * One dataset, the last “dimension” is a vec-tor of variables + * + * This can give good performance, although selecting only a few variables may be slow. This may + * not reflect the science. + *
    + * Record with multiple values + * + * One dataset with compound datatype + * + * This enables the variables to be read all together or selected. Also handles “vectors” of + * heterogeneous data. + *
    + * Vector or Tensor value + * + * One dataset, each data element is a small array of values. + * + * This uses the same amount of space as the previous two, and may represent the science model + * better. + *
    + * + * + * + * + * + * + * + * + * + * + * + *
    Figure 13 Representing data with multiple measurements
    + * \image html Dtypes_fig13a.gif + * + * \image html Dtypes_fig13b.gif + *
    + * \image html Dtypes_fig13c.gif + * + * \image html Dtypes_fig13d.gif + *
    + * + * The HDF5 #H5T_ARRAY datatype defines the data element to be a homogeneous, multi-dimensional array. + * See Figure 13 above. The elements of the array can be any HDF5 datatype + * (including compound and array), and the size of the datatype is the total size of the array. A + * dataset of array datatype cannot be subdivided for I/O within the data element: the entire array of + * the data element must be transferred. If the data elements need to be accessed separately, for + * example, by plane, then the array datatype should not be used. The table below shows + * advantages and disadvantages of various storage methods. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    Storage method advantages and disadvantages
    + *

    Method

    + *
    + *

    Advantages

    + *
    + *

    Disadvantages

    + *
    + * Multiple Datasets + * + * Easy to access each plane, can select any plane(s) + * + * Less efficient to access a ‘column’ through the planes + *
    + * N+1 Dimension + * + * All access patterns supported + * + * Must be homogeneous datatype
    + * The added dimension may not make sense in the scientific model + *
    + * Compound Datatype + * + * Can be heterogeneous datatype + * + * Planes must be named, selection is by plane
    + * Not a natural representation for a matrix + *
    + * Array + * + * A natural representation for vector or tensor data + * + * Cannot access elements separately (no access by plane) + *
    + * + * An array datatype may be multi-dimensional with 1 to #H5S_MAX_RANK(the maximum rank + * of a dataset is currently 32) dimensions. The dimensions can be any size greater than 0, but + * unlimited dimensions are not supported (although the datatype can be a variable-length datatype). + * + * An array datatype is created with the #H5Tarray_create call, which specifies the number of + * dimensions, the size of each dimension, and the base type of the array. The array datatype can + * then be used in any way that any datatype object is used. The example below shows the creation + * of a datatype that is a two-dimensional array of native integers, and this is then used to create a + * dataset. Note that the dataset can be a dataspace that is any number and size of dimensions. The figure + * below shows the layout in memory assuming that the native integers are 4 bytes. Each + * data element has 6 elements, for a total of 24 bytes. + * + * Create a two-dimensional array datatype + * \code + * hid_t file, dataset; + * hid_t datatype, dataspace; + * hsize_t adims[] = {3, 2}; + * + * datatype = H5Tarray_create(H5T_NATIVE_INT, 2, adims, NULL); + * + * dataset = H5Dcreate(file, datasetname, datatype, + * dataspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + * \endcode + * + * + * + * + * + *
    + * \image html Dtypes_fig14.gif "Memory layout of a two-dimensional array datatype" + *
    + * + * @anchor h4_vlen_datatype

    Variable-length Datatypes

    + * + * A variable-length (VL) datatype is a one-dimensional sequence of a datatype which are not fixed + * in length from one dataset location to another. In other words, each data element may have a + * different number of members. Variable-length datatypes cannot be divided;the entire data + * element must be transferred. + * + * VL datatypes are useful to the scientific community in many different ways, possibly including: + *
      + *
    • Ragged arrays: Multi-dimensional ragged arrays can be implemented with the last (fastest changing) + * dimension being ragged by using a VL datatype as the type of the element stored. + *
    • + *
    • Fractal arrays: A nested VL datatype can be used to implement ragged arrays of ragged arrays, to + * whatever nesting depth is required for the user. + *
    • + *
    • Polygon lists: A common storage requirement is to efficiently store arrays of polygons with + * different numbers of vertices. A VL datatype can be used to efficiently and succinctly describe + * an array of polygons with different numbers of vertices. + *
    • + *
    • Character strings: Perhaps the most common use of VL datatypes will be to store C-like VL + * character strings in dataset elements or as attributes of objects. + *
    • + *
    • Indices (for example, of objects within a file): An array of VL object references could be used + * as an index to all the objects in a file which contain a particular sequence of dataset values. + *
    • + *
    • Object Tracking: An array of VL dataset region references can be used as a method of tracking + * objects or features appearing in a sequence of datasets. + *
    • + *
    + * + * A VL datatype is created by calling #H5Tvlen_create which specifies the base datatype. The first + * example below shows an example of code that creates a VL datatype of unsigned integers. Each + * data element is a one-dimensional array of zero or more members and is stored in the + * hvl_t structure. See the second example below. + * + * Create a variable-length datatype of unsigned integers + * \code + * tid1 = H5Tvlen_create (H5T_NATIVE_UINT); + * + * dataset=H5Dcreate(fid1,“Dataset1”, tid1, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + * \endcode + * + * Data element storage for members of the VL datatype + * \code + * typedef struct + * { + * size_t len; // Length of VL data + * //(in base type units) + * void *p; // Pointer to VL data + * } hvl_t; + * \endcode + * + * The first example below shows how the VL data is written. For each of the 10 data elements, a + * length and data buffer must be allocated. Below the two examples is a figure that shows how the + * data is laid out in memory. + * + * An analogous procedure must be used to read the data. See the second example below. An + * appropriate array of vl_t must be allocated, and the data read. It is then traversed one data + * element at a time. The #H5Dvlen_reclaim call frees the data buffer for the buffer. With each + * element possibly being of different sequence lengths for a dataset with a VL datatype, the + * memory for the VL datatype must be dynamically allocated. Currently there are two methods of + * managing the memory for VL datatypes: the standard C malloc/free memory allocation routines + * or a method of calling user-defined memory management routines to allocate or free memory + * (set with #H5Pset_vlen_mem_manager). Since the memory allocated when reading (or writing) + * may be complicated to release, the #H5Dvlen_reclaim function is provided to traverse a memory + * buffer and free the VL datatype information without leaking memory. + * + * Write VL data + * \code + * hvl_t wdata[10]; // Information to write + * + * // Allocate and initialize VL data to write + * for(i = 0; i < 10; i++) { + * wdata[i].p = malloc((i + 1) * sizeof(unsigned int)); + * wdata[i].len = i + 1; + * for(j = 0; j < (i + 1); j++) + * ((unsigned int *)wdata[i].p)[j]=i * 10 + j; + * } + * ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); + * \endcode + * + * Read VL data + * \code + * hvl_t rdata[SPACE1_DIM1]; + * ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, xfer_pid, rdata); + * + * for(i = 0; i < SPACE1_DIM1; i++) { + * printf(“%d: len %d ”,rdata[i].len); + * for(j = 0; j < rdata[i].len; j++) { + * printf(“ value: %u\n”,((unsigned int *)rdata[i].p)[j]); + * } + * } + * ret = H5Dvlen_reclaim(tid1, sid1, xfer_pid, rdata); + * \endcode + * + * + * + * + * + *
    + * \image html Dtypes_fig15.gif "Memory layout of a VL datatype" + *
    + * + * The user program must carefully manage these relatively complex data structures. The + * #H5Dvlen_reclaim function performs a standard traversal, freeing all the data. This function + * analyzes the datatype and dataspace objects, and visits each VL data element, recursing through + * nested types. By default, the system free is called for the pointer in each vl_t. Obviously, this + * call assumes that all of this memory was allocated with the system malloc. + * + * The user program may specify custom memory manager routines, one for allocating and one for + * freeing. These may be set with the #H5Pset_vlen_mem_manager, and must have the following + * prototypes: + *
      + *
    • + * \code + * typedef void *(*H5MM_allocate_t)(size_t size, void *info); + * \endcode + *
    • + *
    • + * \code + * typedef void (*H5MM_free_t)(void *mem, void *free_info); + * \endcode + *
    • + *
    + * The utility function #H5Dvlen_get_buf_size checks the number of bytes required to store the VL + * data from the dataset. This function analyzes the datatype and dataspace object to visit all the VL + * data elements, to determine the number of bytes required to store the data for the in the + * destination storage (memory). The size value is adjusted for data conversion and alignment in the + * destination. + * + * \subsection subsec_datatype_other Other Non-numeric Datatypes + * Several datatype classes define special types of objects. + * + * \subsubsection subsubsec_datatype_other_strings Strings + * Text data is represented by arrays of characters, called strings. Many programming languages + * support different conventions for storing strings, which may be fixed or variable-length, and may + * have different rules for padding unused storage. HDF5 can represent strings in several ways. See + * the figure below. + * + * The strings to store are “Four score” and “lazy programmers.” + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    A string stored as one-character elements in a one-dimensional array
    + * a) #H5T_NATIVE_CHAR: The dataset is a one-dimensional array with 29 elements, and each element + * is a single character. + *
    + * \image html Dtypes_fig16a.gif + *
    + * b) Fixed-length string: The dataset is a one-dimensional array with two elements, and each + * element is 20 characters. + *
    + * \image html Dtypes_fig16b.gif + *
    + * c) Variable-length string: The dataset is a one-dimensional array with two elements, and each + * element is a variable-length string. This is the same result when stored as a fixed-length + * string except that the first element of the array will need only 11 bytes for storage instead of 20. + *
    + * \image html Dtypes_fig16c.gif + *
    + * \image html Dtypes_fig16d.gif + *
    + * + * First, a dataset may have a dataset with datatype #H5T_NATIVE_CHAR with each character of + * the string as an element of the dataset. This will store an unstructured block of text data, but + * gives little indication of any structure in the text. See item a in the figure above. + * + * A second alternative is to store the data using the datatype class #H5T_STRING with each + * element a fixed length. See item b in the figure above. In this approach, each element might be a + * word or a sentence, addressed by the dataspace. The dataset reserves space for the specified + * number of characters, although some strings may be shorter. This approach is simple and usually + * is fast to access, but can waste storage space if the length of the Strings varies. + * + * A third alternative is to use a variable-length datatype. See item c in the figure above. This can + * be done using the standard mechanisms described above. The program would use vl_t structures + * to write and read the data. + * + * A fourth alternative is to use a special feature of the string datatype class to set the size of the + * datatype to #H5T_VARIABLE. See item c in the figure above. The example below shows a + * declaration of a datatype of type #H5T_C_S1 which is set to #H5T_VARIABLE. The HDF5 + * Library automatically translates between this and the vl_t structure. Note: the #H5T_VARIABLE + * size can only be used with string datatypes. + * Set the string datatype size to H5T_VARIABLE + * \code + * tid1 = H5Tcopy (H5T_C_S1); + * ret = H5Tset_size (tid1, H5T_VARIABLE); + * \endcode + * + * Variable-length strings can be read into C strings (in other words, pointers to zero terminated + * arrays of char). See the example below. + * Read variable-length strings into C strings + * \code + * char *rdata[SPACE1_DIM1]; + * + * ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, xfer_pid, rdata); + * + * for(i = 0; i < SPACE1_DIM1; i++) { + * printf(“%d: len: %d, str is: %s\n”, i, strlen(rdata[i]), rdata[i]); + * } + * + * ret = H5Dvlen_reclaim(tid1, sid1, xfer_pid, rdata); + * \endcode + * + * \subsubsection subsubsec_datatype_other_refs Reference + * In HDF5, objects (groups, datasets, and committed datatypes) are usually accessed by name. + * There is another way to access stored objects - by reference. There are two reference datatypes: + * object reference and region reference. Object reference objects are created with #H5Rcreate and + * other calls (cross reference). These objects can be stored and retrieved in a dataset as elements + * with reference datatype. The first example below shows an example of code that creates + * references to four objects, and then writes the array of object references to a dataset. The second + * example below shows a dataset of datatype reference being read and one of the reference objects + * being dereferenced to obtain an object pointer. + * + * In order to store references to regions of a dataset, the datatype should be #H5T_STD_REF_DSETREG. + * Note that a data element must be either an object reference or a region reference: these are + * different types and cannot be mixed within a single array. + * + * A reference datatype cannot be divided for I/O: an element is read or written completely. + * + * Create object references and write to a dataset + * \code + * dataset= H5Dcreate (fid1, “Dataset3”, H5T_STD_REF_OBJ, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + * + * // Create reference to dataset + * ret = H5Rcreate(&wbuf[0], fid1,“/Group1/Dataset1”, H5R_OBJECT, -1); + * + * // Create reference to dataset + * ret = H5Rcreate(&wbuf[1], fid1, “/Group1/Dataset2”, H5R_OBJECT, -1); + * + * // Create reference to group + * ret = H5Rcreate(&wbuf[2], fid1, “/Group1”, H5R_OBJECT, -1); + * + * // Create reference to committed datatype + * ret = H5Rcreate(&wbuf[3], fid1, “/Group1/Datatype1”, H5R_OBJECT, -1); + * + * // Write selection to disk + * ret=H5Dwrite(dataset, H5T_STD_REF_OBJ, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf); + * \endcode + * + * Read a dataset with a reference datatype + * \code + * rbuf = malloc(sizeof(hobj_ref_t)*SPACE1_DIM1); + * + * // Read selection from disk + * ret=H5Dread(dataset, H5T_STD_REF_OBJ, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf); + * + * // Open dataset object + * dset2 = H5Rdereference(dataset, H5R_OBJECT, &rbuf[0]); + * \endcode + * + * \subsubsection subsubsec_datatype_other_enum ENUM + * The enum datatype implements a set of (name, value) pairs, similar to C/C++ enum. The values + * are currently limited to native integer datatypes. Each name can be the name of only one value, + * and each value can have only one name. + * + * The data elements of the ENUMERATION are stored according to the datatype. An example + * would be as an array of integers. The example below shows an example of how to create an + * enumeration with five elements. The elements map symbolic names to 2-byte integers. See the + * table below. + * Create an enumeration with five elements + * \code + * hid_t hdf_en_colors; + * short val; + * + * hdf_en_colors = H5Tcreate(H5T_ENUM, sizeof(short)); + * H5Tenum_insert(hdf_en_colors, “RED”, (val=0, &val)); + * H5Tenum_insert(hdf_en_colors, “GREEN”, (val=1, &val)); + * H5Tenum_insert(hdf_en_colors, “BLUE”, (val=2, &val)); + * H5Tenum_insert(hdf_en_colors, “WHITE”, (val=3, &val)); + * H5Tenum_insert(hdf_en_colors, “BLACK”, (val=4, &val)); + * H5Dcreate(fileid, datasetname, hdf_en_colors, spaceid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + * \endcode + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    An enumeration with five elements
    NameValue
    RED0
    GREEN1
    BLUE2
    WHITE3
    BLACK4
    + * + * The figure below shows how an array of eight values might be stored. Conceptually, the array is + * an array of symbolic names [BLACK, RED, WHITE, BLUE, ...] See item a in the figure below. + * These are stored as the values and are short integers. So, the first 2 bytes are the value associated + * with “BLACK”, which is the number 4, and so on. See item b in the figure below. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    Storing an enum array
    + * a) Logical data to be written - eight elements + *
    + * \image html Dtypes_fig17a.gif + *
    + * b) The storage layout. Total size of the array is 16 bytes, 2 bytes per element. + *
    + * \image html Dtypes_fig17b.gif + *
    + * + * The order that members are inserted into an enumeration type is unimportant; the important part + * is the associations between the symbol names and the values. Thus, two enumeration datatypes + * will be considered equal if and only if both types have the same symbol/value associations and + * both have equal underlying integer datatypes. Type equality is tested with the H5Tequal + * function. + * + * If a particular architecture type is required, a little-endian or big-endian datatype for example, + * use a native integer datatype as the ENUM base datatype and use #H5Tconvert on values as they + * are read from or written to a dataset. + * + * \subsubsection subsubsec_datatype_other_opaque Opaque + * In some cases, a user may have data objects that should be stored and retrieved as blobs with no + * attempt to interpret them. For example, an application might wish to store an array of encrypted + * certificates which are 100 bytes long. + * + * While an arbitrary block of data may always be stored as bytes, characters, integers, or whatever, + * this might mislead programs about the meaning of the data. The opaque datatype defines data + * elements which are uninterpreted by HDF5. The opaque data may be labeled with + * #H5Tset_tag with a string that might be used by an application. For example, the encrypted + * certificates might have a tag to indicate the encryption and the certificate standard. + * + * \subsubsection subsubsec_datatype_other_bitfield Bitfield + * Some data is represented as bits, where the number of bits is not an integral byte and the bits are + * not necessarily interpreted as a standard type. Some examples might include readings from + * machine registers (for example, switch positions), a cloud mask, or data structures with several + * small integers that should be store in a single byte. + * + * This data could be stored as integers, strings, or enumerations. However, these storage methods + * would likely result in considerable wasted space. For example, storing a cloud mask with one + * byte per value would use up to eight times the space of a packed array of bits. + * + * The HDF5 bitfield datatype class defines a data element that is a contiguous sequence of bits, + * which are stored on disk in a packed array. The programming model is the same as for unsigned + * integers: the datatype object is created by copying a predefined datatype, and then the precision, + * offset, and padding are set. + * + * While the use of the bitfield datatype will reduce storage space substantially, there will still be + * wasted space if the bitfield as a whole does not match the 1-, 2-, 4-, or 8-byte unit in which it is + * written. The remaining unused space can be removed by applying the N-bit filter to the dataset + * containing the bitfield data. For more information, see "Using the N-bit Filter." + * + * \subsection subsec_datatype_fill Fill Values + * The “fill value” for a dataset is the specification of the default value assigned to data elements + * that have not yet been written. In the case of a dataset with an atomic datatype, the fill value is a + * single value of the appropriate datatype, such as ‘0’ or ‘-1.0’. In the case of a dataset with a + * composite datatype, the fill value is a single data element of the appropriate type. For example, + * for an array or compound datatype, the fill value is a single data element with values for all the + * component elements of the array or compound datatype. + * + * The fill value is set (permanently) when the dataset is created. The fill value is set in the dataset + * creation properties in the #H5Dcreate call. Note that the #H5Dcreate call must also include the + * datatype of the dataset, and the value provided for the fill value will be interpreted as a single + * element of this datatype. The example below shows code which creates a dataset of integers with + * fill value -1. Any unwritten data elements will be set to -1. + * + * Create a dataset with a fill value of -1 + * \code + * hid_t plist_id; + * int filler; + * + * filler = -1; + * plist_id = H5Pcreate(H5P_DATASET_CREATE); + * H5Pset_fill_value(plist_id, H5T_NATIVE_INT, &filler); + * + * // Create the dataset with fill value ‘-1’. + * dataset_id = H5Dcreate(file_id, “/dset”, H5T_STD_I32BE, dataspace_id, H5P_DEFAULT, plist_id, + * H5P_DEFAULT); + * \endcode + * + * Create a fill value for a compound datatype + * \code + * typedef struct s1_t { + * int a; + * char b; + * double c; + * } s1_t; + * s1_t filler; + * + * s1_tid = H5Tcreate (H5T_COMPOUND, sizeof(s1_t)); + * H5Tinsert(s1_tid, “a_name”, HOFFSET(s1_t, a), H5T_NATIVE_INT); + * H5Tinsert(s1_tid, “b_name”, HOFFSET(s1_t, b), H5T_NATIVE_CHAR); + * H5Tinsert(s1_tid, “c_name”, HOFFSET(s1_t, c), H5T_NATIVE_DOUBLE); + * + * filler.a = -1; + * filler.b = ‘*’; + * filler.c = -2.0; + * plist_id = H5Pcreate(H5P_DATASET_CREATE); + * H5Pset_fill_value(plist_id, s1_tid, &filler); + * + * // Create the dataset with fill value + * // (-1, ‘*’, -2.0). + * dataset = H5Dcreate(file, datasetname, s1_tid, space, H5P_DEFAULT, plist_id, H5P_DEFAULT); + * \endcode + * + * The code above shows how to create a fill value for a compound datatype. The procedure is the + * same as the previous example except the filler must be a structure with the correct fields. Each + * field is initialized to the desired fill value. + * + * The fill value for a dataset can be retrieved by reading the dataset creation properties of the + * dataset and then by reading the fill value with #H5Pget_fill_value. The data will be read into + * memory using the storage layout specified by the datatype. This transfer will convert data in the + * same way as #H5Dread. The example below shows how to get the fill value from the dataset + * created in the example "Create a dataset with a fill value of -1". + * + * Retrieve a fill value + * \code + * hid_t plist2; + * int filler; + * + * dataset_id = H5Dopen(file_id, “/dset”, H5P_DEFAULT); + * plist2 = H5Dget_create_plist(dataset_id); + * + * H5Pget_fill_value(plist2, H5T_NATIVE_INT, &filler); + * + * // filler has the fill value, ‘-1’ + * \endcode + * + * A similar procedure is followed for any datatype. The example below shows how to read the fill + * value for the compound datatype created in an example above. Note that the program must pass + * an element large enough to hold a fill value of the datatype indicated by the argument to + * #H5Pget_fill_value. Also, the program must understand the datatype in order to interpret its + * components. This may be difficult to determine without knowledge of the application that + * created the dataset. + * + * Read the fill value for a compound datatype + * \code + * char *fillbuf; + * int sz; + * + * dataset = H5Dopen( file, DATASETNAME, H5P_DEFAULT); + * + * s1_tid = H5Dget_type(dataset); + * + * sz = H5Tget_size(s1_tid); + * + * fillbuf = (char *)malloc(sz); + * + * plist_id = H5Dget_create_plist(dataset); + * + * H5Pget_fill_value(plist_id, s1_tid, fillbuf); + * + * printf(“filler.a: %d\n”,((s1_t *) fillbuf)->a); + * printf(“filler.b: %c\n”,((s1_t *) fillbuf)->b); + * printf(“filler.c: %f\n”,((s1_t *) fillbuf)->c); + * \endcode + * + * \subsection subsec_datatype_complex Complex Combinations of Datatypes + * Several composite datatype classes define collections of other datatypes, including other + * composite datatypes. In general, a datatype can be nested to any depth, with any combination of + * datatypes. + * + * For example, a compound datatype can have members that are other compound datatypes, arrays, + * VL datatypes. An array can be an array of array, an array of compound, or an array of VL. And a + * VL datatype can be a variable-length array of compound, array, or VL datatypes. + * + * These complicated combinations of datatypes form a logical tree, with a single root datatype, and + * leaves which must be atomic datatypes (predefined or user-defined). The figure below shows an + * example of a logical tree describing a compound datatype constructed from different datatypes. + * + * Recall that the datatype is a description of the layout of storage. The complicated compound + * datatype is constructed from component datatypes, each of which describes the layout of part of + * the storage. Any datatype can be used as a component of a compound datatype, with the + * following restrictions: + *
    • 1. No byte can be part of more than one component datatype (in other words, the fields cannot + * overlap within the compound datatype)
    • + *
    • 2. The total size of the components must be less than or equal to the total size of the compound + * datatype
    + * These restrictions are essentially the rules for C structures and similar record types familiar from + * programming languages. Multiple typing, such as a C union, is not allowed in HDF5 datatypes. + * + * + * + * + * + *
    + * \image html Dtypes_fig18.gif "A compound datatype built with different datatypes" + *
    + * + * \subsubsection subsubsec_datatype_complex_create Creating a Complicated Compound Datatype + * To construct a complicated compound datatype, each component is constructed, and then added + * to the enclosing datatype description. The example below shows how to create a compound + * datatype with four members: + * \li “T1”, a compound datatype with three members + * \li “T2”, a compound datatype with two members + * \li “T3”, a one-dimensional array of integers + * \li “T4”, a string + * + * Below the example code is a figure that shows this datatype as a logical tree. The output of the + * h5dump utility is shown in the example below the figure. + * + * Each datatype is created as a separate datatype object. Figure "The storage layout for the + * four member datatypes" below shows the storage layout + * for the four individual datatypes. Then the datatypes are inserted into the outer datatype at an + * appropriate offset. Figure "The storage layout of the combined four members" below shows the + * resulting storage layout. The combined record is 89 bytes long. + * + * The Dataset is created using the combined compound datatype. The dataset is declared to be a 4 + * by 3 array of compound data. Each data element is an instance of the 89-byte compound + * datatype. Figure "The layout of the dataset" below shows the layout of the dataset, and expands + * one of the elements to show the relative position of the component data elements. + * + * Each data element is a compound datatype, which can be written or read as a record, or each + * field may be read or written individually. The first field (“T1”) is itself a compound datatype + * with three fields (“T1.a”, “T1.b”, and “T1.c”). “T1” can be read or written as a record, or + * individual fields can be accessed. Similarly, the second filed is a compound datatype with two + * fields (“T2.f1”, “T2.f2”). + * + * The third field (“T3”) is an array datatype. Thus, “T3” should be accessed as an array of 40 + * integers. Array data can only be read or written as a single element, so all 40 integers must be + * read or written to the third field. The fourth field (“T4”) is a single string of length 25. + * + * Create a compound datatype with four members + * \code + * typedef struct s1_t { + * int a; + * char b; + * double c; + * } s1_t; + * typedef struct s2_t { + * float f1; + * float f2; + * } s2_t; + * hid_t s1_tid, s2_tid, s3_tid, s4_tid, s5_tid; + * + * // Create a datatype for s1 + * s1_tid = H5Tcreate (H5T_COMPOUND, sizeof(s1_t)); + * H5Tinsert(s1_tid, “a_name”, HOFFSET(s1_t, a), H5T_NATIVE_INT); + * H5Tinsert(s1_tid, “b_name”, HOFFSET(s1_t, b), H5T_NATIVE_CHAR); + * H5Tinsert(s1_tid, “c_name”, HOFFSET(s1_t, c), H5T_NATIVE_DOUBLE); + * + * // Create a datatype for s2. + * s2_tid = H5Tcreate (H5T_COMPOUND, sizeof(s2_t)); + * H5Tinsert(s2_tid, “f1”, HOFFSET(s2_t, f1), H5T_NATIVE_FLOAT); + * H5Tinsert(s2_tid, “f2”, HOFFSET(s2_t, f2), H5T_NATIVE_FLOAT); + * + * // Create a datatype for an Array of integers + * s3_tid = H5Tarray_create(H5T_NATIVE_INT, RANK, dim); + * + * // Create a datatype for a String of 25 characters + * s4_tid = H5Tcopy(H5T_C_S1); + * H5Tset_size(s4_tid, 25); + * + * // Create a compound datatype composed of one of each of these types. + * // The total size is the sum of the size of each. + * sz = H5Tget_size(s1_tid) + H5Tget_size(s2_tid) + H5Tget_size(s3_tid) + H5Tget_size(s4_tid); + * s5_tid = H5Tcreate (H5T_COMPOUND, sz); + * + * // Insert the component types at the appropriate offsets. + * H5Tinsert(s5_tid, “T1”, 0, s1_tid); + * H5Tinsert(s5_tid, “T2”, sizeof(s1_t), s2_tid); + * H5Tinsert(s5_tid, “T3”, sizeof(s1_t) + sizeof(s2_t), s3_tid); + * H5Tinsert(s5_tid, “T4”, (sizeof(s1_t) + sizeof(s2_t) + H5Tget_size(s3_tid)), s4_tid); + * + * // Create the dataset with this datatype. + * dataset = H5Dcreate(file, DATASETNAME, s5_tid, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + * \endcode + * + * + * + * + * + *
    + * \image html Dtypes_fig19.gif "Logical tree for the compound datatype with four members" + *
    + * + * Output from h5dump for the compound datatype + * \code + * DATATYPE H5T_COMPOUND { + * H5T_COMPOUND { + * H5T_STD_I32LE “a_name”; + * H5T_STD_I8LE “b_name”; + * H5T_IEEE_F64LE “c_name”; + * } “T1”; + * H5T_COMPOUND { + * H5T_IEEE_F32LE “f1”; + * H5T_IEEE_F32LE “f2”; + * } “T2”; + * H5T_ARRAY { [10] H5T_STD_I32LE } “T3”; + * H5T_STRING { + * STRSIZE 25; + * STRPAD H5T_STR_NULLTERM; + * CSET H5T_CSET_ASCII; + * CTYPE H5T_C_S1; + * } “T4”; + * } + * \endcode + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    The storage layout for the four member datatypes
    + * a) Compound type ‘s1_t’, size 16 bytes. + *
    + * \image html Dtypes_fig20a.gif + *
    + * b) Compound type ‘s2_t’, size 8 bytes. + *
    + * \image html Dtypes_fig20b.gif + *
    + * c) Array type ‘s3_tid’, 40 integers, total size 40 bytes. + *
    + * \image html Dtypes_fig20c.gif + *
    + * d) String type ‘s4_tid’, size 25 bytes. + *
    + * \image html Dtypes_fig20d.gif + *
    + * + * + * + * + * + *
    + * \image html Dtypes_fig21.gif "The storage layout of the combined four members" + *
    + * + * \li A 4 x 3 array of Compound Datatype + * \li Element [1,1] expanded + * + * + * + * + *
    + * \image html Dtypes_fig22.gif "The layout of the dataset" + *
    + * + * \subsubsection subsubsec_datatype_complex_analyze Analyzing and Navigating a Compound Datatype + * A complicated compound datatype can be analyzed piece by piece to discover the exact storage + * layout. In the example above, the outer datatype is analyzed to discover that it is a compound + * datatype with four members. Each member is analyzed in turn to construct a complete map of the + * storage layout. + * + * The example below shows an example of code that partially analyzes a nested compound + * datatype. The name and overall offset and size of the component datatype is discovered, and then + * its type is analyzed depending on the datatype class. Through this method, the complete storage + * layout can be discovered. + * + * Output from h5dump for the compound datatype + * \code + * s1_tid = H5Dget_type(dataset); + * + * if (H5Tget_class(s1_tid) == H5T_COMPOUND) { + * printf(“COMPOUND DATATYPE {\n”); + * sz = H5Tget_size(s1_tid); + * nmemb = H5Tget_nmembers(s1_tid); + * printf(“ %d bytes\n”,sz); + * printf(“ %d members\n”,nmemb); + * for (i =0; i < nmemb; i++) { + * s2_tid = H5Tget_member_type(s1_tid, i); + * if (H5Tget_class(s2_tid) == H5T_COMPOUND) { + * // recursively analyze the nested type. + * } + * else if (H5Tget_class(s2_tid) == H5T_ARRAY) { + * sz2 = H5Tget_size(s2_tid); + * printf(“ %s: NESTED ARRAY DATATYPE offset %d size %d + * {\n”, H5Tget_member_name(s1_tid, i), H5Tget_member_offset(s1_tid, i), sz2); + * H5Tget_array_dims(s2_tid, dim); + * s3_tid = H5Tget_super(s2_tid); + * // Etc., analyze the base type of the array + * } + * else { + * // analyze a simple type + * printf(“ %s: type code %d offset %d size %d\n”, H5Tget_member_name(s1_tid, i), + * H5Tget_class(s2_tid), H5Tget_member_offset(s1_tid, i), H5Tget_size(s2_tid)); + * } + * // and so on.... + * \endcode + * + * \subsection subsec_datatype_life Life Cycle of the Datatype Object + * Application programs access HDF5 datatypes through identifiers. Identifiers are obtained by + * creating a new datatype or by copying or opening an existing datatype. The identifier can be used + * until it is closed or until the library shuts down. See items a and b in the figure below. By default, + * a datatype is transient, and it disappears when it is closed. + * + * When a dataset or attribute is created (#H5Dcreate or #H5Acreate), its datatype is stored in the + * HDF5 file as part of the dataset or attribute object. See item c in the figure below. Once an object + * created, its datatype cannot be changed or deleted. The datatype can be accessed by calling + * #H5Dget_type, #H5Aget_type, #H5Tget_super, or #H5Tget_member_type. See item d in the figure + * below. These calls return an identifier to a transient copy of the datatype of the dataset or + * attribute unless the datatype is a committed datatype. + * Note that when an object is created, the stored datatype is a copy of the transient datatype. If two + * objects are created with the same datatype, the information is stored in each object with the same + * effect as if two different datatypes were created and used. + * + * A transient datatype can be stored using #H5Tcommit in the HDF5 file as an independent, named + * object, called a committed datatype. Committed datatypes were formerly known as named + * datatypes. See item e in the figure below. Subsequently, when a committed datatype is opened + * with #H5Topen (item f), or is obtained with #H5Tget_member_type or similar call (item k), the return + * is an identifier to a transient copy of the stored datatype. The identifier can be used in the + * same way as other datatype identifiers except that the committed datatype cannot be modified. When a + * committed datatype is copied with #H5Tcopy, the return is a new, modifiable, transient datatype + * object (item f). + * + * When an object is created using a committed datatype (#H5Dcreate, #H5Acreate), the stored + * datatype is used without copying it to the object. See item j in the figure below. In this case, if + * multiple objects are created using the same committed datatype, they all share the exact same + * datatype object. This saves space and makes clear that the datatype is shared. Note that a + * committed datatype can be shared by objects within the same HDF5 file, but not by objects in + * other files. For more information on copying committed datatypes to other HDF5 files, see the + * “Copying Committed Datatypes with H5Ocopy” topic in the “Additional Resources” chapter. + * + * A committed datatype can be deleted from the file by calling #H5Ldelete which replaces + * #H5Gunlink. See item i in the figure below. If one or more objects are still using the datatype, the + * committed datatype cannot be accessed with #H5Topen, but will not be removed from the file + * until it is no longer used. #H5Tget_member_type and similar calls will return a transient copy of the + * datatype. + * + * + * + * + * + *
    + * \image html Dtypes_fig23.gif "Life cycle of a datatype" + *
    + * + * Transient datatypes are initially modifiable. Note that when a datatype is copied or when it is + * written to the file (when an object is created) or the datatype is used to create a composite + * datatype, a copy of the current state of the datatype is used. If the datatype is then modified, the + * changes have no effect on datasets, attributes, or datatypes that have already been created. See + * the figure below. + * + * A transient datatype can be made read-only (#H5Tlock). Note that the datatype is still transient, + * and otherwise does not change. A datatype that is immutable is read-only but cannot be closed + * except when the entire library is closed. The predefined types such as #H5T_NATIVE_INT are + * immutable transient types. + * + * + * + * + * + *
    + * \image html Dtypes_fig24.gif "Transient datatype states: modifiable, read-only, and immutable" + *
    + * + * To create two or more datasets that share a common datatype, first commit the datatype, and then + * use that datatype to create the datasets. See the example below. + * Create a shareable datatype + * \code + * hid_t t1 = ...some transient type...; + * H5Tcommit (file, “shared_type”, t1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + * hid_t dset1 = H5Dcreate (file, “dset1”, t1, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + * hid_t dset2 = H5Dcreate (file, “dset2”, t1, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + * + * hid_t dset1 = H5Dopen (file, “dset1”, H5P_DEFAULT); + * hid_t t2 = H5Dget_type (dset1); + * hid_t dset3 = H5Dcreate (file, “dset3”, t2, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + * hid_t dset4 = H5Dcreate (file, “dset4”, t2, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + * \endcode + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    Datatype APIs
    FunctionDescription
    + * \code + * hid_t H5Topen (hid_t location, const char *name) + * \endcode + * + * A committed datatype can be opened by calling this function, which returns a datatype identifier. + * The identifier should eventually be released by calling #H5Tclose() to release resources. The + * committed datatype returned by this function is read-only or a negative value is returned for failure. + * The location is either a file or group identifier. + *
    + * \code + * herr_t H5Tcommit (hid_t location, const char *name, hid_t type, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT) + * \endcode + * + * A transient datatype (not immutable) can be written to a file and turned into a committed datatype by + * calling this function. The location is either a file or group identifier and when combined with name + * refers to a new committed datatype. + *
    + * \code + * htri_t H5Tcommitted (hid_t type) + * \endcode + * + * A type can be queried to determine if it is a committed type or a transient type. If this function + * returns a positive value then the type is committed. Datasets which return committed datatypes with + * #H5Dget_type() are able to share the datatype with other datasets in the same file. + *
    + * + * \subsection subsec_datatype_transfer Data Transfer: Datatype Conversion and Selection + * When data is transferred (write or read), the storage layout of the data elements may be different. + * For example, an integer might be stored on disk in big-endian byte order and read into memory + * with little-endian byte order. In this case, each data element will be transformed by the HDF5 + * Library during the data transfer. + * + * The conversion of data elements is controlled by specifying the datatype of the source and + * specifying the intended datatype of the destination. The storage format on disk is the datatype + * specified when the dataset is created. The datatype of memory must be specified in the library + * call. + * + * In order to be convertible, the datatype of the source and destination must have the same + * datatype class (with the exception of enumeration type). Thus, integers can be converted to other + * integers, and floats to other floats, but integers cannot (yet) be converted to floats. For each + * atomic datatype class, the possible conversions are defined. An enumeration datatype can be + * converted to an integer or a floating-point number datatype. + * + * Basically, any datatype can be converted to another datatype of the same datatype class. The + * HDF5 Library automatically converts all properties. If the destination is too small to hold the + * source value then an overflow or underflow exception occurs. If a handler is defined with the + * #H5Pset_type_conv_cb function, it will be called. Otherwise, a default action will be performed. + * The table below summarizes the default actions. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    Default actions for datatype conversion exceptions
    Datatype ClassPossible ExceptionsDefault Action
    IntegerSize, offset, pad
    FloatSize, offset, pad, ebits
    StringSizeTruncates, zero terminate if required.
    EnumerationNo fieldAll bits set
    + * + * For example, when reading data from a dataset, the source datatype is the datatype set when the + * dataset was created, and the destination datatype is the description of the storage layout in + * memory. The destination datatype must be specified in the #H5Dread call. The example below + * shows an example of reading a dataset of 32-bit integers. The figure below the example shows + * the data transformation that is performed. + * Specify the destination datatype with H5Dread + * \code + * // Stored as H5T_STD_BE32 + * // Use the native memory order in the destination + * mem_type_id = H5Tcopy(H5T_NATIVE_INT); + * status = H5Dread(dataset_id, mem_type_id, mem_space_id, file_space_id, xfer_plist_id, buf); + * \endcode + * + * + * + * + * + * + *
    Layout of a datatype conversion
    + * \image html Dtypes_fig25a.gif
    + * \image html Dtypes_fig25b.gif
    + * \image html Dtypes_fig25c.gif + *
    + * + * One thing to note in the example above is the use of the predefined native datatype + * #H5T_NATIVE_INT. Recall that in this example, the data was stored as a 4-bytes in big-endian + * order. The application wants to read this data into an array of integers in memory. Depending on + * the system, the storage layout of memory might be either big or little-endian, so the data may + * need to be transformed on some platforms and not on others. The #H5T_NATIVE_INT type is set + * by the HDF5 Library to be the correct type to describe the storage layout of the memory on the + * system. Thus, the code in the example above will work correctly on any platform, performing a + * transformation when needed. + * + * There are predefined native types for most atomic datatypes, and these can be combined in + * composite datatypes. In general, the predefined native datatypes should always be used for data + * stored in memory. + * Predefined native datatypes describe the storage properties of memory. + * + * + * + * + * + *
    + * \image html Dtypes_fig26.gif "An enum datatype conversion" + *
    + * + * Create an aligned and packed compound datatype + * \code + * // Stored as H5T_STD_BE32 + * // Use the native memory order in the destination + * mem_type_id = H5Tcopy(H5T_NATIVE_INT); + * status = H5Dread(dataset_id, mem_type_id, mem_space_id, file_space_id, xfer_plist_id, buf); + * \endcode + * + * + * + * + * + *
    + * \image html Dtypes_fig27.gif "Alignment of a compound datatype" + *
    + * + * Transfer some fields of a compound datatype + * \code + * // Stored as H5T_STD_BE32 + * // Use the native memory order in the destination + * mem_type_id = H5Tcopy(H5T_NATIVE_INT); + * status = H5Dread(dataset_id, mem_type_id, mem_space_id, file_space_id, xfer_plist_id, buf); + * \endcode + * + * + * + * + * + *
    + * \image html Dtypes_fig28.gif "Layout when an element is skipped" + *
    + * + * \subsection subsec_datatype_text Text Descriptions of Datatypes: Conversion to and from + * + * HDF5 provides a means for generating a portable and human-readable text description of a + * datatype and for generating a datatype from such a text description. This capability is particularly + * useful for creating complex datatypes in a single step, for creating a text description of a datatype + * for debugging purposes, and for creating a portable datatype definition that can then be used to + * recreate the datatype on many platforms or in other applications. + * + * These tasks are handled by two functions provided in the HDF5 Lite high-level library: + * \li #H5LTtext_to_dtype Creates an HDF5 datatype in a single step. + * \li #H5LTdtype_to_text Translates an HDF5 datatype into a text description. + * + * Note that this functionality requires that the HDF5 High-Level Library (H5LT) be installed. + * + * While #H5LTtext_to_dtype can be used to generate any sort of datatype, it is particularly useful + * for complex datatypes. + * + * #H5LTdtype_to_text is most likely to be used in two sorts of situations: when a datatype must be + * closely examined for debugging purpose or to create a portable text description of the datatype + * that can then be used to recreate the datatype on other platforms or in other applications. + * + * These two functions work for all valid HDF5 datatypes except time, bitfield, and reference + * datatypes. + * + * The currently supported text format used by #H5LTtext_to_dtype and #H5LTdtype_to_text is the + * data description language (DDL) and conforms to the \ref DDLBNF110. The portion of the + * \ref DDLBNF110 that defines HDF5 datatypes appears below. + * The definition of HDF5 datatypes from the HDF5 DDL + * \code + * ::= | | | + * + * ::= | |