From 794ba0a251af47b8e3c60afa2fe92d267e2a6b55 Mon Sep 17 00:00:00 2001 From: Frank Baker Date: Tue, 19 Jul 2005 12:28:56 -0500 Subject: [svn-r11084] Description: All HDF5 user documentation has been moved to a separate hdf5doc/ repository, managed under Subversion. With this 'cvs commit', all files are stripped from hdf5/doc/. THIS CHANGE IS APPLIED ONLY TO THE HDF5 DEVELOPMENT BRANCH, post Release 1.6.x; it is not applied to the release branches. --- doc/Makefile.am | 23 - doc/Makefile.in | 619 -- doc/html/ADGuide.html | 266 - doc/html/ADGuide/Changes.html | 1086 --- doc/html/ADGuide/H4toH5Mapping.doc | Bin 238592 -> 0 bytes doc/html/ADGuide/H4toH5Mapping.pdf | Bin 823200 -> 0 bytes doc/html/ADGuide/HISTORY.txt | 3180 ------- doc/html/ADGuide/ImageSpec.html | 1279 --- doc/html/ADGuide/Makefile.am | 18 - doc/html/ADGuide/Makefile.in | 487 - doc/html/ADGuide/PaletteExample1.gif | Bin 2731 -> 0 bytes doc/html/ADGuide/Palettes.fm.anc.gif | Bin 4748 -> 0 bytes doc/html/ADGuide/RELEASE.txt | 906 -- doc/html/Attributes.html | 287 - doc/html/Big.html | 122 - doc/html/Caching.html | 190 - doc/html/Chunk_f1.gif | Bin 3664 -> 0 bytes doc/html/Chunk_f1.obj | 252 - doc/html/Chunk_f2.gif | Bin 3986 -> 0 bytes doc/html/Chunk_f2.obj | 95 - doc/html/Chunk_f3.gif | Bin 6815 -> 0 bytes doc/html/Chunk_f4.gif | Bin 5772 -> 0 bytes doc/html/Chunk_f5.gif | Bin 5455 -> 0 bytes doc/html/Chunk_f6.gif | Bin 4949 -> 0 bytes doc/html/Chunk_f6.obj | 107 - doc/html/Chunking.html | 313 - doc/html/CodeReview.html | 300 - doc/html/Coding.html | 300 - doc/html/Copyright.html | 121 - doc/html/Datasets.html | 954 -- doc/html/Dataspaces.html | 742 -- doc/html/Datatypes.html | 3114 ------- doc/html/DatatypesEnum.html | 926 -- doc/html/Debugging.html | 516 -- doc/html/EnumMap.gif | Bin 1682 -> 0 bytes doc/html/Environment.html | 166 - doc/html/Errors.html | 386 - doc/html/ExternalFiles.html | 279 - doc/html/FF-IH_FileGroup.gif | Bin 3407 -> 0 bytes doc/html/FF-IH_FileObject.gif | Bin 2136 -> 0 bytes doc/html/Files.html | 607 -- doc/html/Filters.html | 593 -- doc/html/Glossary.html | 573 -- doc/html/Graphics/C++.gif | Bin 147 -> 0 bytes doc/html/Graphics/FORTRAN.gif | Bin 194 -> 0 bytes doc/html/Graphics/Java.gif | Bin 161 -> 0 bytes doc/html/Graphics/Makefile.am | 17 - doc/html/Graphics/Makefile.in | 485 - doc/html/Graphics/OtherAPIs.gif | Bin 185 -> 0 bytes doc/html/Groups.html | 404 - doc/html/H5.api_map.html | 849 -- doc/html/H5.format.html | 5956 ------------ doc/html/H5.intro.html | 3161 ------- doc/html/H5.sample_code.html | 123 - doc/html/H5.user.PrintGen.html | 132 - doc/html/H5.user.PrintTpg.html | 79 - doc/html/H5.user.html | 243 - doc/html/IH_map1.gif | Bin 2560 -> 0 bytes doc/html/IH_map2.gif | Bin 2560 -> 0 bytes doc/html/IH_map3.gif | Bin 3072 -> 0 bytes doc/html/IH_map4.gif | Bin 3072 -> 0 bytes doc/html/IH_mapFoot.gif | Bin 1024 -> 0 bytes doc/html/IH_mapHead.gif | Bin 2048 -> 0 bytes doc/html/IOPipe.html | 114 - doc/html/Intro/IntroExamples.html | 2128 ----- doc/html/Intro/Makefile.am | 17 - doc/html/Intro/Makefile.in | 485 - doc/html/Lib_Maint.html | 113 - doc/html/Makefile.am | 43 - doc/html/Makefile.in | 670 -- doc/html/MemoryManagement.html | 510 - doc/html/MountingFiles.html | 427 - doc/html/NCSAfooterlogo.gif | Bin 1818 -> 0 bytes doc/html/ObjectHeader.txt | 60 - doc/html/PSandPDF/Makefile.am | 16 - doc/html/PSandPDF/Makefile.in | 453 - doc/html/PSandPDF/process.txt | 218 - doc/html/Performance.html | 260 - doc/html/PredefDTypes.html | 516 -- doc/html/Properties.html | 185 - doc/html/RM_H5.html | 650 -- doc/html/RM_H5A.html | 954 -- doc/html/RM_H5D.html | 1584 ---- doc/html/RM_H5E.html | 1689 ---- doc/html/RM_H5F.html | 1970 ---- doc/html/RM_H5Front.html | 409 - doc/html/RM_H5G.html | 1521 --- doc/html/RM_H5I.html | 1187 --- doc/html/RM_H5P.html | 9783 -------------------- doc/html/RM_H5R.html | 543 -- doc/html/RM_H5S.html | 1884 ---- doc/html/RM_H5T.html | 4001 -------- doc/html/RM_H5Z.html | 655 -- doc/html/References.html | 651 -- doc/html/TechNotes.html | 319 - doc/html/TechNotes/Automake.html | 223 - doc/html/TechNotes/Basic_perform.html | 75 - doc/html/TechNotes/BigDataSmMach.html | 122 - doc/html/TechNotes/ChStudy_1000x1000.gif | Bin 6594 -> 0 bytes doc/html/TechNotes/ChStudy_250x250.gif | Bin 6914 -> 0 bytes doc/html/TechNotes/ChStudy_499x499.gif | Bin 10429 -> 0 bytes doc/html/TechNotes/ChStudy_5000x1000.gif | Bin 10653 -> 0 bytes doc/html/TechNotes/ChStudy_500x500.gif | Bin 6842 -> 0 bytes doc/html/TechNotes/ChStudy_p1.gif | Bin 6550 -> 0 bytes doc/html/TechNotes/ChStudy_p1.obj | 113 - doc/html/TechNotes/ChunkingStudy.html | 190 - doc/html/TechNotes/CodeReview.html | 300 - doc/html/TechNotes/Daily_Test_Explained.htm | 863 -- doc/html/TechNotes/DataTransformReport.htm | 877 -- doc/html/TechNotes/ExternalFiles.html | 279 - doc/html/TechNotes/FreeLists.html | 205 - doc/html/TechNotes/H4-H5Compat.html | 271 - doc/html/TechNotes/HeapMgmt.html | 84 - doc/html/TechNotes/IOPipe.html | 114 - doc/html/TechNotes/LibMaint.html | 128 - doc/html/TechNotes/Makefile.am | 25 - doc/html/TechNotes/Makefile.in | 494 - doc/html/TechNotes/MemoryMgmt.html | 510 - doc/html/TechNotes/MoveDStruct.html | 66 - doc/html/TechNotes/NamingScheme.html | 300 - doc/html/TechNotes/ObjectHeader.html | 72 - doc/html/TechNotes/RawDStorage.html | 274 - doc/html/TechNotes/ReservedFileSpace.html | 29 - doc/html/TechNotes/SWControls.html | 96 - doc/html/TechNotes/SymbolTables.html | 329 - doc/html/TechNotes/TestReview.html | 57 - doc/html/TechNotes/TestReview/H5Dget_offset.html | 199 - .../TechNotes/TestReview/H5Tget_native_type.html | 522 -- doc/html/TechNotes/ThreadSafeLibrary.html | 794 -- doc/html/TechNotes/VFL.html | 1543 --- doc/html/TechNotes/VFLfunc.html | 64 - doc/html/TechNotes/VLTypes.html | 150 - doc/html/TechNotes/Version.html | 137 - doc/html/TechNotes/openmp-hdf5.c | 403 - doc/html/TechNotes/openmp-hdf5.html | 67 - doc/html/TechNotes/pipe1.gif | Bin 10110 -> 0 bytes doc/html/TechNotes/pipe1.obj | 136 - doc/html/TechNotes/pipe2.gif | Bin 11715 -> 0 bytes doc/html/TechNotes/pipe2.obj | 168 - doc/html/TechNotes/pipe3.gif | Bin 6961 -> 0 bytes doc/html/TechNotes/pipe3.obj | 70 - doc/html/TechNotes/pipe4.gif | Bin 8355 -> 0 bytes doc/html/TechNotes/pipe4.obj | 92 - doc/html/TechNotes/pipe5.gif | Bin 6217 -> 0 bytes doc/html/TechNotes/pipe5.obj | 52 - doc/html/TechNotes/shuffling-algorithm-report.pdf | Bin 78850 -> 0 bytes doc/html/TechNotes/version.gif | Bin 4772 -> 0 bytes doc/html/TechNotes/version.obj | 96 - doc/html/Tools.html | 2760 ------ doc/html/Tutor/Contents.html | 104 - doc/html/Tutor/ContentsAdd.html | 54 - doc/html/Tutor/ContentsAdv.html | 57 - doc/html/Tutor/ContentsFull.html | 71 - doc/html/Tutor/ContentsIntro.html | 63 - doc/html/Tutor/Copyright.html | 117 - doc/html/Tutor/Graphics/AddInfo.gif | Bin 274 -> 0 bytes doc/html/Tutor/Graphics/AdvTopics.gif | Bin 240 -> 0 bytes doc/html/Tutor/Graphics/BLANK.gif | Bin 164 -> 0 bytes doc/html/Tutor/Graphics/ChunkExt.gif | Bin 333 -> 0 bytes doc/html/Tutor/Graphics/CompDTypes.gif | Bin 315 -> 0 bytes doc/html/Tutor/Graphics/Copy.gif | Bin 284 -> 0 bytes doc/html/Tutor/Graphics/CreateAttr.gif | Bin 272 -> 0 bytes doc/html/Tutor/Graphics/CreateDset1.gif | Bin 283 -> 0 bytes doc/html/Tutor/Graphics/CreateDset2.gif | Bin 280 -> 0 bytes doc/html/Tutor/Graphics/CreateFile.gif | Bin 269 -> 0 bytes doc/html/Tutor/Graphics/CreateGrp1.gif | Bin 270 -> 0 bytes doc/html/Tutor/Graphics/CreateGrp2.gif | Bin 270 -> 0 bytes doc/html/Tutor/Graphics/Examples.gif | Bin 297 -> 0 bytes doc/html/Tutor/Graphics/FileOrg.gif | Bin 273 -> 0 bytes doc/html/Tutor/Graphics/FullTOC1.gif | Bin 202 -> 0 bytes doc/html/Tutor/Graphics/FullTOC2.gif | Bin 206 -> 0 bytes doc/html/Tutor/Graphics/Glossary.gif | Bin 238 -> 0 bytes doc/html/Tutor/Graphics/H5API.gif | Bin 237 -> 0 bytes doc/html/Tutor/Graphics/Intro.gif | Bin 244 -> 0 bytes doc/html/Tutor/Graphics/IntroTopics.gif | Bin 256 -> 0 bytes doc/html/Tutor/Graphics/Iterate.gif | Bin 270 -> 0 bytes doc/html/Tutor/Graphics/Makefile.am | 24 - doc/html/Tutor/Graphics/Makefile.in | 493 - doc/html/Tutor/Graphics/MountFile.gif | Bin 254 -> 0 bytes doc/html/Tutor/Graphics/Quiz.gif | Bin 202 -> 0 bytes doc/html/Tutor/Graphics/QuizAns.gif | Bin 256 -> 0 bytes doc/html/Tutor/Graphics/RdWrDataset.gif | Bin 305 -> 0 bytes doc/html/Tutor/Graphics/RefObject.gif | Bin 292 -> 0 bytes doc/html/Tutor/Graphics/RefRegion.gif | Bin 296 -> 0 bytes doc/html/Tutor/Graphics/References.gif | Bin 240 -> 0 bytes doc/html/Tutor/Graphics/SelectElemCp.gif | Bin 307 -> 0 bytes doc/html/Tutor/Graphics/SelectHyp.gif | Bin 308 -> 0 bytes doc/html/Tutor/Graphics/TOC.gif | Bin 306 -> 0 bytes doc/html/Tutor/Graphics/TOCFull.gif | Bin 204 -> 0 bytes doc/html/Tutor/Graphics/TOCShort.gif | Bin 215 -> 0 bytes doc/html/Tutor/Graphics/TitlePg.gif | Bin 300 -> 0 bytes doc/html/Tutor/Graphics/Utilities.gif | Bin 222 -> 0 bytes doc/html/Tutor/Makefile.am | 25 - doc/html/Tutor/Makefile.in | 651 -- doc/html/Tutor/answers.html | 322 - doc/html/Tutor/api.html | 151 - doc/html/Tutor/bighdf2sp.JPG | Bin 8712 -> 0 bytes doc/html/Tutor/compound.html | 234 - doc/html/Tutor/crtatt.html | 343 - doc/html/Tutor/crtdat.html | 497 - doc/html/Tutor/crtfile.html | 317 - doc/html/Tutor/crtgrp.html | 202 - doc/html/Tutor/crtgrpar.html | 229 - doc/html/Tutor/crtgrpd.html | 163 - doc/html/Tutor/examples/Makefile.am | 38 - doc/html/Tutor/examples/Makefile.in | 530 -- doc/html/Tutor/examples/attrexample.f90 | 87 - doc/html/Tutor/examples/chunk.f90 | 310 - doc/html/Tutor/examples/compound.f90 | 215 - doc/html/Tutor/examples/dsetexample.f90 | 70 - doc/html/Tutor/examples/fileexample.f90 | 34 - doc/html/Tutor/examples/groupexample.f90 | 49 - doc/html/Tutor/examples/grpdsetexample.f90 | 136 - doc/html/Tutor/examples/grpit.f90 | 194 - doc/html/Tutor/examples/grpsexample.f90 | 68 - doc/html/Tutor/examples/h5_compound.c | 153 - doc/html/Tutor/examples/h5_copy.c | 148 - doc/html/Tutor/examples/h5_crtatt.c | 46 - doc/html/Tutor/examples/h5_crtdat.c | 34 - doc/html/Tutor/examples/h5_crtfile.c | 19 - doc/html/Tutor/examples/h5_crtgrp.c | 24 - doc/html/Tutor/examples/h5_crtgrpar.c | 32 - doc/html/Tutor/examples/h5_crtgrpd.c | 74 - doc/html/Tutor/examples/h5_extend.c | 141 - doc/html/Tutor/examples/h5_hyperslab.c | 192 - doc/html/Tutor/examples/h5_iterate.c | 111 - doc/html/Tutor/examples/h5_mount.c | 119 - doc/html/Tutor/examples/h5_rdwt.c | 37 - doc/html/Tutor/examples/h5_read.c | 136 - doc/html/Tutor/examples/h5_ref2objr.c | 93 - doc/html/Tutor/examples/h5_ref2objw.c | 120 - doc/html/Tutor/examples/h5_ref2regr.c | 119 - doc/html/Tutor/examples/h5_ref2regw.c | 112 - doc/html/Tutor/examples/h5_reference.c | 146 - doc/html/Tutor/examples/hyperslab.f90 | 199 - doc/html/Tutor/examples/java/Compound.java | 540 -- doc/html/Tutor/examples/java/Copy.java | 541 -- doc/html/Tutor/examples/java/CreateAttribute.java | 302 - doc/html/Tutor/examples/java/CreateDataset.java | 210 - doc/html/Tutor/examples/java/CreateFile.java | 83 - doc/html/Tutor/examples/java/CreateFileInput.java | 118 - doc/html/Tutor/examples/java/CreateGroup.java | 139 - doc/html/Tutor/examples/java/CreateGroupAR.java | 152 - .../Tutor/examples/java/CreateGroupDataset.java | 340 - doc/html/Tutor/examples/java/DatasetRdWt.java | 213 - doc/html/Tutor/examples/java/Dependencies | 0 doc/html/Tutor/examples/java/HyperSlab.java | 590 -- doc/html/Tutor/examples/java/Makefile | 92 - doc/html/Tutor/examples/java/Makefile.in | 91 - doc/html/Tutor/examples/java/README | 21 - doc/html/Tutor/examples/java/readme.html | 192 - doc/html/Tutor/examples/java/runCompound.sh | 17 - doc/html/Tutor/examples/java/runCompound.sh.in | 17 - doc/html/Tutor/examples/java/runCopy.sh | 17 - doc/html/Tutor/examples/java/runCopy.sh.in | 17 - doc/html/Tutor/examples/java/runCreateAttribute.sh | 17 - .../Tutor/examples/java/runCreateAttribute.sh.in | 17 - doc/html/Tutor/examples/java/runCreateDataset.sh | 17 - .../Tutor/examples/java/runCreateDataset.sh.in | 17 - doc/html/Tutor/examples/java/runCreateFile.sh | 17 - doc/html/Tutor/examples/java/runCreateFile.sh.in | 17 - doc/html/Tutor/examples/java/runCreateFileInput.sh | 17 - .../Tutor/examples/java/runCreateFileInput.sh.in | 17 - doc/html/Tutor/examples/java/runCreateGroup.sh | 17 - doc/html/Tutor/examples/java/runCreateGroup.sh.in | 17 - doc/html/Tutor/examples/java/runCreateGroupAR.sh | 17 - .../Tutor/examples/java/runCreateGroupAR.sh.in | 17 - .../Tutor/examples/java/runCreateGroupDataset.sh | 17 - .../examples/java/runCreateGroupDataset.sh.in | 17 - doc/html/Tutor/examples/java/runDatasetRdWt.sh | 17 - doc/html/Tutor/examples/java/runDatasetRdWt.sh.in | 17 - doc/html/Tutor/examples/java/runHyperSlab.sh | 17 - doc/html/Tutor/examples/java/runHyperSlab.sh.in | 17 - doc/html/Tutor/examples/mountexample.f90 | 187 - doc/html/Tutor/examples/refobjexample.f90 | 142 - doc/html/Tutor/examples/refregexample.f90 | 162 - doc/html/Tutor/examples/rwdsetexample.f90 | 78 - doc/html/Tutor/examples/selectele.f90 | 282 - doc/html/Tutor/extend.html | 284 - doc/html/Tutor/fileorg.html | 102 - doc/html/Tutor/footer-ncsalogo.gif | Bin 1405 -> 0 bytes doc/html/Tutor/glossary.html | 261 - doc/html/Tutor/img001.gif | Bin 635 -> 0 bytes doc/html/Tutor/img002.gif | Bin 954 -> 0 bytes doc/html/Tutor/img003.gif | Bin 928 -> 0 bytes doc/html/Tutor/img004.gif | Bin 1644 -> 0 bytes doc/html/Tutor/img005.gif | Bin 1812 -> 0 bytes doc/html/Tutor/index.html | 29 - doc/html/Tutor/intro.html | 92 - doc/html/Tutor/iterate.html | 290 - doc/html/Tutor/mount.html | 255 - doc/html/Tutor/property.html | 167 - doc/html/Tutor/questions.html | 159 - doc/html/Tutor/rdwt.html | 409 - doc/html/Tutor/references.html | 66 - doc/html/Tutor/reftoobj.html | 318 - doc/html/Tutor/reftoreg.html | 366 - doc/html/Tutor/select.html | 309 - doc/html/Tutor/selectc.html | 265 - doc/html/Tutor/software.html | 85 - doc/html/Tutor/title.html | 105 - doc/html/Tutor/util.html | 85 - doc/html/Version.html | 137 - doc/html/chunk1.gif | Bin 5111 -> 0 bytes doc/html/chunk1.obj | 52 - doc/html/compat.html | 271 - doc/html/cpplus/CppInterfaces.html | 1437 --- doc/html/cpplus/CppUserNotes.doc | Bin 136192 -> 0 bytes doc/html/cpplus/CppUserNotes.pdf | Bin 55301 -> 0 bytes doc/html/cpplus/Makefile.am | 17 - doc/html/cpplus/Makefile.in | 485 - doc/html/dataset_p1.gif | Bin 3359 -> 0 bytes doc/html/dataset_p1.obj | 32 - doc/html/ddl.html | 579 -- doc/html/ed_libs/Footer.lbi | 5 - doc/html/ed_libs/Makefile.am | 20 - doc/html/ed_libs/Makefile.in | 489 - doc/html/ed_libs/NavBar_ADevG.lbi | 18 - doc/html/ed_libs/NavBar_Common.lbi | 17 - doc/html/ed_libs/NavBar_Intro.lbi | 17 - doc/html/ed_libs/NavBar_RM.lbi | 39 - doc/html/ed_libs/NavBar_TechN.lbi | 27 - doc/html/ed_libs/NavBar_UG.lbi | 40 - doc/html/ed_libs/styles_Format.lbi | 18 - doc/html/ed_libs/styles_Gen.lbi | 18 - doc/html/ed_libs/styles_Index.lbi | 18 - doc/html/ed_libs/styles_Intro.lbi | 18 - doc/html/ed_libs/styles_RM.lbi | 19 - doc/html/ed_libs/styles_UG.lbi | 18 - doc/html/ed_styles/FormatElect.css | 35 - doc/html/ed_styles/FormatPrint.css | 58 - doc/html/ed_styles/GenElect.css | 35 - doc/html/ed_styles/GenPrint.css | 58 - doc/html/ed_styles/IndexElect.css | 35 - doc/html/ed_styles/IndexPrint.css | 58 - doc/html/ed_styles/IntroElect.css | 35 - doc/html/ed_styles/IntroPrint.css | 58 - doc/html/ed_styles/Makefile.am | 19 - doc/html/ed_styles/Makefile.in | 488 - doc/html/ed_styles/RMelect.css | 39 - doc/html/ed_styles/RMprint.css | 58 - doc/html/ed_styles/UGelect.css | 35 - doc/html/ed_styles/UGprint.css | 58 - doc/html/extern1.gif | Bin 1989 -> 0 bytes doc/html/extern1.obj | 40 - doc/html/extern2.gif | Bin 4054 -> 0 bytes doc/html/extern2.obj | 108 - doc/html/fortran/F90Flags.html | 332 - doc/html/fortran/F90UserNotes.html | 141 - doc/html/fortran/Makefile.am | 17 - doc/html/fortran/Makefile.in | 485 - doc/html/group_p1.gif | Bin 3696 -> 0 bytes doc/html/group_p1.obj | 85 - doc/html/group_p2.gif | Bin 3524 -> 0 bytes doc/html/group_p2.obj | 57 - doc/html/group_p3.gif | Bin 3354 -> 0 bytes doc/html/group_p3.obj | 59 - doc/html/h5s.examples | 347 - doc/html/hdf2.jpg | Bin 3034 -> 0 bytes doc/html/heap.txt | 72 - doc/html/index.html | 308 - doc/html/move.html | 66 - doc/html/ph5design.html | 77 - doc/html/ph5example.c | 1018 -- doc/html/ph5implement.txt | 27 - doc/html/pipe1.gif | Bin 10110 -> 0 bytes doc/html/pipe1.obj | 136 - doc/html/pipe2.gif | Bin 11715 -> 0 bytes doc/html/pipe2.obj | 168 - doc/html/pipe3.gif | Bin 6961 -> 0 bytes doc/html/pipe3.obj | 70 - doc/html/pipe4.gif | Bin 8355 -> 0 bytes doc/html/pipe4.obj | 92 - doc/html/pipe5.gif | Bin 6217 -> 0 bytes doc/html/pipe5.obj | 52 - doc/html/review1.html | 283 - doc/html/review1a.html | 252 - doc/html/storage.html | 274 - doc/html/symtab | 313 - doc/html/version.gif | Bin 4772 -> 0 bytes doc/html/version.obj | 96 - doc/src/Copyright.html | 76 - doc/src/Glossary.html | 109 - doc/src/H5.intro.doc | Bin 325120 -> 0 bytes doc/src/RM_H5.html | 223 - doc/src/RM_H5A.html | 523 -- doc/src/RM_H5D.html | 431 - doc/src/RM_H5E.html | 367 - doc/src/RM_H5F.html | 334 - doc/src/RM_H5Front.html | 98 - doc/src/RM_H5G.html | 744 -- doc/src/RM_H5P.html | 1996 ---- doc/src/RM_H5R.html | 248 - doc/src/RM_H5S.html | 749 -- doc/src/RM_H5T.html | 1769 ---- doc/src/RM_H5Z.html | 129 - doc/src/Tools.html | 265 - doc/tgif/APIGrammar.obj | 216 - doc/tgif/FileGrammar.obj | 552 -- doc/tgif/IOPipe.obj | 715 -- doc/tgif/RobbPipe.obj | 136 - doc/tgif/UserView.obj | 1203 --- 402 files changed, 114737 deletions(-) delete mode 100644 doc/Makefile.am delete mode 100644 doc/Makefile.in delete mode 100644 doc/html/ADGuide.html delete mode 100755 doc/html/ADGuide/Changes.html delete mode 100755 doc/html/ADGuide/H4toH5Mapping.doc delete mode 100644 doc/html/ADGuide/H4toH5Mapping.pdf delete mode 100644 doc/html/ADGuide/HISTORY.txt delete mode 100755 doc/html/ADGuide/ImageSpec.html delete mode 100644 doc/html/ADGuide/Makefile.am delete mode 100644 doc/html/ADGuide/Makefile.in delete mode 100755 doc/html/ADGuide/PaletteExample1.gif delete mode 100755 doc/html/ADGuide/Palettes.fm.anc.gif delete mode 100644 doc/html/ADGuide/RELEASE.txt delete mode 100644 doc/html/Attributes.html delete mode 100644 doc/html/Big.html delete mode 100644 doc/html/Caching.html delete mode 100644 doc/html/Chunk_f1.gif delete mode 100644 doc/html/Chunk_f1.obj delete mode 100644 doc/html/Chunk_f2.gif delete mode 100644 doc/html/Chunk_f2.obj delete mode 100644 doc/html/Chunk_f3.gif delete mode 100644 doc/html/Chunk_f4.gif delete mode 100644 doc/html/Chunk_f5.gif delete mode 100644 doc/html/Chunk_f6.gif delete mode 100644 doc/html/Chunk_f6.obj delete mode 100644 doc/html/Chunking.html delete mode 100644 doc/html/CodeReview.html delete mode 100644 doc/html/Coding.html delete mode 100644 doc/html/Copyright.html delete mode 100644 doc/html/Datasets.html delete mode 100644 doc/html/Dataspaces.html delete mode 100644 doc/html/Datatypes.html delete mode 100644 doc/html/DatatypesEnum.html delete mode 100644 doc/html/Debugging.html delete mode 100644 doc/html/EnumMap.gif delete mode 100644 doc/html/Environment.html delete mode 100644 doc/html/Errors.html delete mode 100644 doc/html/ExternalFiles.html delete mode 100644 doc/html/FF-IH_FileGroup.gif delete mode 100644 doc/html/FF-IH_FileObject.gif delete mode 100644 doc/html/Files.html delete mode 100644 doc/html/Filters.html delete mode 100644 doc/html/Glossary.html delete mode 100755 doc/html/Graphics/C++.gif delete mode 100755 doc/html/Graphics/FORTRAN.gif delete mode 100755 doc/html/Graphics/Java.gif delete mode 100644 doc/html/Graphics/Makefile.am delete mode 100644 doc/html/Graphics/Makefile.in delete mode 100755 doc/html/Graphics/OtherAPIs.gif delete mode 100644 doc/html/Groups.html delete mode 100644 doc/html/H5.api_map.html delete mode 100644 doc/html/H5.format.html delete mode 100644 doc/html/H5.intro.html delete mode 100644 doc/html/H5.sample_code.html delete mode 100644 doc/html/H5.user.PrintGen.html delete mode 100644 doc/html/H5.user.PrintTpg.html delete mode 100644 doc/html/H5.user.html delete mode 100644 doc/html/IH_map1.gif delete mode 100644 doc/html/IH_map2.gif delete mode 100644 doc/html/IH_map3.gif delete mode 100644 doc/html/IH_map4.gif delete mode 100644 doc/html/IH_mapFoot.gif delete mode 100644 doc/html/IH_mapHead.gif delete mode 100644 doc/html/IOPipe.html delete mode 100644 doc/html/Intro/IntroExamples.html delete mode 100644 doc/html/Intro/Makefile.am delete mode 100644 doc/html/Intro/Makefile.in delete mode 100644 doc/html/Lib_Maint.html delete mode 100644 doc/html/Makefile.am delete mode 100644 doc/html/Makefile.in delete mode 100644 doc/html/MemoryManagement.html delete mode 100644 doc/html/MountingFiles.html delete mode 100644 doc/html/NCSAfooterlogo.gif delete mode 100644 doc/html/ObjectHeader.txt delete mode 100644 doc/html/PSandPDF/Makefile.am delete mode 100644 doc/html/PSandPDF/Makefile.in delete mode 100644 doc/html/PSandPDF/process.txt delete mode 100644 doc/html/Performance.html delete mode 100644 doc/html/PredefDTypes.html delete mode 100644 doc/html/Properties.html delete mode 100644 doc/html/RM_H5.html delete mode 100644 doc/html/RM_H5A.html delete mode 100644 doc/html/RM_H5D.html delete mode 100644 doc/html/RM_H5E.html delete mode 100644 doc/html/RM_H5F.html delete mode 100644 doc/html/RM_H5Front.html delete mode 100644 doc/html/RM_H5G.html delete mode 100644 doc/html/RM_H5I.html delete mode 100644 doc/html/RM_H5P.html delete mode 100644 doc/html/RM_H5R.html delete mode 100644 doc/html/RM_H5S.html delete mode 100644 doc/html/RM_H5T.html delete mode 100644 doc/html/RM_H5Z.html delete mode 100644 doc/html/References.html delete mode 100644 doc/html/TechNotes.html delete mode 100644 doc/html/TechNotes/Automake.html delete mode 100644 doc/html/TechNotes/Basic_perform.html delete mode 100644 doc/html/TechNotes/BigDataSmMach.html delete mode 100644 doc/html/TechNotes/ChStudy_1000x1000.gif delete mode 100644 doc/html/TechNotes/ChStudy_250x250.gif delete mode 100644 doc/html/TechNotes/ChStudy_499x499.gif delete mode 100644 doc/html/TechNotes/ChStudy_5000x1000.gif delete mode 100644 doc/html/TechNotes/ChStudy_500x500.gif delete mode 100644 doc/html/TechNotes/ChStudy_p1.gif delete mode 100644 doc/html/TechNotes/ChStudy_p1.obj delete mode 100644 doc/html/TechNotes/ChunkingStudy.html delete mode 100644 doc/html/TechNotes/CodeReview.html delete mode 100644 doc/html/TechNotes/Daily_Test_Explained.htm delete mode 100644 doc/html/TechNotes/DataTransformReport.htm delete mode 100644 doc/html/TechNotes/ExternalFiles.html delete mode 100644 doc/html/TechNotes/FreeLists.html delete mode 100644 doc/html/TechNotes/H4-H5Compat.html delete mode 100644 doc/html/TechNotes/HeapMgmt.html delete mode 100644 doc/html/TechNotes/IOPipe.html delete mode 100644 doc/html/TechNotes/LibMaint.html delete mode 100644 doc/html/TechNotes/Makefile.am delete mode 100644 doc/html/TechNotes/Makefile.in delete mode 100644 doc/html/TechNotes/MemoryMgmt.html delete mode 100644 doc/html/TechNotes/MoveDStruct.html delete mode 100644 doc/html/TechNotes/NamingScheme.html delete mode 100644 doc/html/TechNotes/ObjectHeader.html delete mode 100644 doc/html/TechNotes/RawDStorage.html delete mode 100644 doc/html/TechNotes/ReservedFileSpace.html delete mode 100755 doc/html/TechNotes/SWControls.html delete mode 100644 doc/html/TechNotes/SymbolTables.html delete mode 100644 doc/html/TechNotes/TestReview.html delete mode 100644 doc/html/TechNotes/TestReview/H5Dget_offset.html delete mode 100644 doc/html/TechNotes/TestReview/H5Tget_native_type.html delete mode 100644 doc/html/TechNotes/ThreadSafeLibrary.html delete mode 100644 doc/html/TechNotes/VFL.html delete mode 100644 doc/html/TechNotes/VFLfunc.html delete mode 100644 doc/html/TechNotes/VLTypes.html delete mode 100644 doc/html/TechNotes/Version.html delete mode 100644 doc/html/TechNotes/openmp-hdf5.c delete mode 100644 doc/html/TechNotes/openmp-hdf5.html delete mode 100644 doc/html/TechNotes/pipe1.gif delete mode 100644 doc/html/TechNotes/pipe1.obj delete mode 100644 doc/html/TechNotes/pipe2.gif delete mode 100644 doc/html/TechNotes/pipe2.obj delete mode 100644 doc/html/TechNotes/pipe3.gif delete mode 100644 doc/html/TechNotes/pipe3.obj delete mode 100644 doc/html/TechNotes/pipe4.gif delete mode 100644 doc/html/TechNotes/pipe4.obj delete mode 100644 doc/html/TechNotes/pipe5.gif delete mode 100644 doc/html/TechNotes/pipe5.obj delete mode 100755 doc/html/TechNotes/shuffling-algorithm-report.pdf delete mode 100644 doc/html/TechNotes/version.gif delete mode 100644 doc/html/TechNotes/version.obj delete mode 100644 doc/html/Tools.html delete mode 100644 doc/html/Tutor/Contents.html delete mode 100644 doc/html/Tutor/ContentsAdd.html delete mode 100644 doc/html/Tutor/ContentsAdv.html delete mode 100644 doc/html/Tutor/ContentsFull.html delete mode 100644 doc/html/Tutor/ContentsIntro.html delete mode 100644 doc/html/Tutor/Copyright.html delete mode 100644 doc/html/Tutor/Graphics/AddInfo.gif delete mode 100644 doc/html/Tutor/Graphics/AdvTopics.gif delete mode 100644 doc/html/Tutor/Graphics/BLANK.gif delete mode 100644 doc/html/Tutor/Graphics/ChunkExt.gif delete mode 100644 doc/html/Tutor/Graphics/CompDTypes.gif delete mode 100644 doc/html/Tutor/Graphics/Copy.gif delete mode 100644 doc/html/Tutor/Graphics/CreateAttr.gif delete mode 100644 doc/html/Tutor/Graphics/CreateDset1.gif delete mode 100644 doc/html/Tutor/Graphics/CreateDset2.gif delete mode 100644 doc/html/Tutor/Graphics/CreateFile.gif delete mode 100644 doc/html/Tutor/Graphics/CreateGrp1.gif delete mode 100644 doc/html/Tutor/Graphics/CreateGrp2.gif delete mode 100644 doc/html/Tutor/Graphics/Examples.gif delete mode 100644 doc/html/Tutor/Graphics/FileOrg.gif delete mode 100644 doc/html/Tutor/Graphics/FullTOC1.gif delete mode 100644 doc/html/Tutor/Graphics/FullTOC2.gif delete mode 100644 doc/html/Tutor/Graphics/Glossary.gif delete mode 100644 doc/html/Tutor/Graphics/H5API.gif delete mode 100644 doc/html/Tutor/Graphics/Intro.gif delete mode 100644 doc/html/Tutor/Graphics/IntroTopics.gif delete mode 100644 doc/html/Tutor/Graphics/Iterate.gif delete mode 100644 doc/html/Tutor/Graphics/Makefile.am delete mode 100644 doc/html/Tutor/Graphics/Makefile.in delete mode 100644 doc/html/Tutor/Graphics/MountFile.gif delete mode 100644 doc/html/Tutor/Graphics/Quiz.gif delete mode 100644 doc/html/Tutor/Graphics/QuizAns.gif delete mode 100644 doc/html/Tutor/Graphics/RdWrDataset.gif delete mode 100755 doc/html/Tutor/Graphics/RefObject.gif delete mode 100755 doc/html/Tutor/Graphics/RefRegion.gif delete mode 100644 doc/html/Tutor/Graphics/References.gif delete mode 100644 doc/html/Tutor/Graphics/SelectElemCp.gif delete mode 100644 doc/html/Tutor/Graphics/SelectHyp.gif delete mode 100644 doc/html/Tutor/Graphics/TOC.gif delete mode 100644 doc/html/Tutor/Graphics/TOCFull.gif delete mode 100644 doc/html/Tutor/Graphics/TOCShort.gif delete mode 100644 doc/html/Tutor/Graphics/TitlePg.gif delete mode 100644 doc/html/Tutor/Graphics/Utilities.gif delete mode 100644 doc/html/Tutor/Makefile.am delete mode 100644 doc/html/Tutor/Makefile.in delete mode 100644 doc/html/Tutor/answers.html delete mode 100644 doc/html/Tutor/api.html delete mode 100644 doc/html/Tutor/bighdf2sp.JPG delete mode 100644 doc/html/Tutor/compound.html delete mode 100644 doc/html/Tutor/crtatt.html delete mode 100644 doc/html/Tutor/crtdat.html delete mode 100644 doc/html/Tutor/crtfile.html delete mode 100644 doc/html/Tutor/crtgrp.html delete mode 100644 doc/html/Tutor/crtgrpar.html delete mode 100644 doc/html/Tutor/crtgrpd.html delete mode 100644 doc/html/Tutor/examples/Makefile.am delete mode 100644 doc/html/Tutor/examples/Makefile.in delete mode 100644 doc/html/Tutor/examples/attrexample.f90 delete mode 100644 doc/html/Tutor/examples/chunk.f90 delete mode 100644 doc/html/Tutor/examples/compound.f90 delete mode 100644 doc/html/Tutor/examples/dsetexample.f90 delete mode 100644 doc/html/Tutor/examples/fileexample.f90 delete mode 100644 doc/html/Tutor/examples/groupexample.f90 delete mode 100644 doc/html/Tutor/examples/grpdsetexample.f90 delete mode 100644 doc/html/Tutor/examples/grpit.f90 delete mode 100644 doc/html/Tutor/examples/grpsexample.f90 delete mode 100644 doc/html/Tutor/examples/h5_compound.c delete mode 100644 doc/html/Tutor/examples/h5_copy.c delete mode 100644 doc/html/Tutor/examples/h5_crtatt.c delete mode 100644 doc/html/Tutor/examples/h5_crtdat.c delete mode 100644 doc/html/Tutor/examples/h5_crtfile.c delete mode 100644 doc/html/Tutor/examples/h5_crtgrp.c delete mode 100644 doc/html/Tutor/examples/h5_crtgrpar.c delete mode 100644 doc/html/Tutor/examples/h5_crtgrpd.c delete mode 100644 doc/html/Tutor/examples/h5_extend.c delete mode 100644 doc/html/Tutor/examples/h5_hyperslab.c delete mode 100644 doc/html/Tutor/examples/h5_iterate.c delete mode 100644 doc/html/Tutor/examples/h5_mount.c delete mode 100644 doc/html/Tutor/examples/h5_rdwt.c delete mode 100644 doc/html/Tutor/examples/h5_read.c delete mode 100644 doc/html/Tutor/examples/h5_ref2objr.c delete mode 100644 doc/html/Tutor/examples/h5_ref2objw.c delete mode 100644 doc/html/Tutor/examples/h5_ref2regr.c delete mode 100644 doc/html/Tutor/examples/h5_ref2regw.c delete mode 100644 doc/html/Tutor/examples/h5_reference.c delete mode 100644 doc/html/Tutor/examples/hyperslab.f90 delete mode 100644 doc/html/Tutor/examples/java/Compound.java delete mode 100644 doc/html/Tutor/examples/java/Copy.java delete mode 100644 doc/html/Tutor/examples/java/CreateAttribute.java delete mode 100644 doc/html/Tutor/examples/java/CreateDataset.java delete mode 100644 doc/html/Tutor/examples/java/CreateFile.java delete mode 100644 doc/html/Tutor/examples/java/CreateFileInput.java delete mode 100644 doc/html/Tutor/examples/java/CreateGroup.java delete mode 100644 doc/html/Tutor/examples/java/CreateGroupAR.java delete mode 100644 doc/html/Tutor/examples/java/CreateGroupDataset.java delete mode 100644 doc/html/Tutor/examples/java/DatasetRdWt.java delete mode 100644 doc/html/Tutor/examples/java/Dependencies delete mode 100644 doc/html/Tutor/examples/java/HyperSlab.java delete mode 100644 doc/html/Tutor/examples/java/Makefile delete mode 100644 doc/html/Tutor/examples/java/Makefile.in delete mode 100644 doc/html/Tutor/examples/java/README delete mode 100644 doc/html/Tutor/examples/java/readme.html delete mode 100644 doc/html/Tutor/examples/java/runCompound.sh delete mode 100644 doc/html/Tutor/examples/java/runCompound.sh.in delete mode 100644 doc/html/Tutor/examples/java/runCopy.sh delete mode 100644 doc/html/Tutor/examples/java/runCopy.sh.in delete mode 100644 doc/html/Tutor/examples/java/runCreateAttribute.sh delete mode 100644 doc/html/Tutor/examples/java/runCreateAttribute.sh.in delete mode 100644 doc/html/Tutor/examples/java/runCreateDataset.sh delete mode 100644 doc/html/Tutor/examples/java/runCreateDataset.sh.in delete mode 100644 doc/html/Tutor/examples/java/runCreateFile.sh delete mode 100644 doc/html/Tutor/examples/java/runCreateFile.sh.in delete mode 100644 doc/html/Tutor/examples/java/runCreateFileInput.sh delete mode 100644 doc/html/Tutor/examples/java/runCreateFileInput.sh.in delete mode 100644 doc/html/Tutor/examples/java/runCreateGroup.sh delete mode 100644 doc/html/Tutor/examples/java/runCreateGroup.sh.in delete mode 100644 doc/html/Tutor/examples/java/runCreateGroupAR.sh delete mode 100644 doc/html/Tutor/examples/java/runCreateGroupAR.sh.in delete mode 100644 doc/html/Tutor/examples/java/runCreateGroupDataset.sh delete mode 100644 doc/html/Tutor/examples/java/runCreateGroupDataset.sh.in delete mode 100644 doc/html/Tutor/examples/java/runDatasetRdWt.sh delete mode 100644 doc/html/Tutor/examples/java/runDatasetRdWt.sh.in delete mode 100644 doc/html/Tutor/examples/java/runHyperSlab.sh delete mode 100644 doc/html/Tutor/examples/java/runHyperSlab.sh.in delete mode 100644 doc/html/Tutor/examples/mountexample.f90 delete mode 100644 doc/html/Tutor/examples/refobjexample.f90 delete mode 100644 doc/html/Tutor/examples/refregexample.f90 delete mode 100644 doc/html/Tutor/examples/rwdsetexample.f90 delete mode 100644 doc/html/Tutor/examples/selectele.f90 delete mode 100644 doc/html/Tutor/extend.html delete mode 100644 doc/html/Tutor/fileorg.html delete mode 100644 doc/html/Tutor/footer-ncsalogo.gif delete mode 100644 doc/html/Tutor/glossary.html delete mode 100644 doc/html/Tutor/img001.gif delete mode 100644 doc/html/Tutor/img002.gif delete mode 100644 doc/html/Tutor/img003.gif delete mode 100644 doc/html/Tutor/img004.gif delete mode 100644 doc/html/Tutor/img005.gif delete mode 100644 doc/html/Tutor/index.html delete mode 100644 doc/html/Tutor/intro.html delete mode 100644 doc/html/Tutor/iterate.html delete mode 100644 doc/html/Tutor/mount.html delete mode 100644 doc/html/Tutor/property.html delete mode 100644 doc/html/Tutor/questions.html delete mode 100644 doc/html/Tutor/rdwt.html delete mode 100644 doc/html/Tutor/references.html delete mode 100644 doc/html/Tutor/reftoobj.html delete mode 100644 doc/html/Tutor/reftoreg.html delete mode 100644 doc/html/Tutor/select.html delete mode 100644 doc/html/Tutor/selectc.html delete mode 100644 doc/html/Tutor/software.html delete mode 100644 doc/html/Tutor/title.html delete mode 100644 doc/html/Tutor/util.html delete mode 100644 doc/html/Version.html delete mode 100644 doc/html/chunk1.gif delete mode 100644 doc/html/chunk1.obj delete mode 100644 doc/html/compat.html delete mode 100644 doc/html/cpplus/CppInterfaces.html delete mode 100644 doc/html/cpplus/CppUserNotes.doc delete mode 100644 doc/html/cpplus/CppUserNotes.pdf delete mode 100644 doc/html/cpplus/Makefile.am delete mode 100644 doc/html/cpplus/Makefile.in delete mode 100644 doc/html/dataset_p1.gif delete mode 100644 doc/html/dataset_p1.obj delete mode 100644 doc/html/ddl.html delete mode 100644 doc/html/ed_libs/Footer.lbi delete mode 100644 doc/html/ed_libs/Makefile.am delete mode 100644 doc/html/ed_libs/Makefile.in delete mode 100644 doc/html/ed_libs/NavBar_ADevG.lbi delete mode 100644 doc/html/ed_libs/NavBar_Common.lbi delete mode 100644 doc/html/ed_libs/NavBar_Intro.lbi delete mode 100644 doc/html/ed_libs/NavBar_RM.lbi delete mode 100644 doc/html/ed_libs/NavBar_TechN.lbi delete mode 100644 doc/html/ed_libs/NavBar_UG.lbi delete mode 100644 doc/html/ed_libs/styles_Format.lbi delete mode 100644 doc/html/ed_libs/styles_Gen.lbi delete mode 100644 doc/html/ed_libs/styles_Index.lbi delete mode 100644 doc/html/ed_libs/styles_Intro.lbi delete mode 100644 doc/html/ed_libs/styles_RM.lbi delete mode 100644 doc/html/ed_libs/styles_UG.lbi delete mode 100644 doc/html/ed_styles/FormatElect.css delete mode 100644 doc/html/ed_styles/FormatPrint.css delete mode 100644 doc/html/ed_styles/GenElect.css delete mode 100644 doc/html/ed_styles/GenPrint.css delete mode 100644 doc/html/ed_styles/IndexElect.css delete mode 100644 doc/html/ed_styles/IndexPrint.css delete mode 100644 doc/html/ed_styles/IntroElect.css delete mode 100644 doc/html/ed_styles/IntroPrint.css delete mode 100644 doc/html/ed_styles/Makefile.am delete mode 100644 doc/html/ed_styles/Makefile.in delete mode 100644 doc/html/ed_styles/RMelect.css delete mode 100644 doc/html/ed_styles/RMprint.css delete mode 100644 doc/html/ed_styles/UGelect.css delete mode 100644 doc/html/ed_styles/UGprint.css delete mode 100644 doc/html/extern1.gif delete mode 100644 doc/html/extern1.obj delete mode 100644 doc/html/extern2.gif delete mode 100644 doc/html/extern2.obj delete mode 100644 doc/html/fortran/F90Flags.html delete mode 100644 doc/html/fortran/F90UserNotes.html delete mode 100644 doc/html/fortran/Makefile.am delete mode 100644 doc/html/fortran/Makefile.in delete mode 100644 doc/html/group_p1.gif delete mode 100644 doc/html/group_p1.obj delete mode 100644 doc/html/group_p2.gif delete mode 100644 doc/html/group_p2.obj delete mode 100644 doc/html/group_p3.gif delete mode 100644 doc/html/group_p3.obj delete mode 100644 doc/html/h5s.examples delete mode 100644 doc/html/hdf2.jpg delete mode 100644 doc/html/heap.txt delete mode 100644 doc/html/index.html delete mode 100644 doc/html/move.html delete mode 100644 doc/html/ph5design.html delete mode 100644 doc/html/ph5example.c delete mode 100644 doc/html/ph5implement.txt delete mode 100644 doc/html/pipe1.gif delete mode 100644 doc/html/pipe1.obj delete mode 100644 doc/html/pipe2.gif delete mode 100644 doc/html/pipe2.obj delete mode 100644 doc/html/pipe3.gif delete mode 100644 doc/html/pipe3.obj delete mode 100644 doc/html/pipe4.gif delete mode 100644 doc/html/pipe4.obj delete mode 100644 doc/html/pipe5.gif delete mode 100644 doc/html/pipe5.obj delete mode 100644 doc/html/review1.html delete mode 100644 doc/html/review1a.html delete mode 100644 doc/html/storage.html delete mode 100644 doc/html/symtab delete mode 100644 doc/html/version.gif delete mode 100644 doc/html/version.obj delete mode 100644 doc/src/Copyright.html delete mode 100755 doc/src/Glossary.html delete mode 100755 doc/src/H5.intro.doc delete mode 100755 doc/src/RM_H5.html delete mode 100644 doc/src/RM_H5A.html delete mode 100644 doc/src/RM_H5D.html delete mode 100644 doc/src/RM_H5E.html delete mode 100644 doc/src/RM_H5F.html delete mode 100644 doc/src/RM_H5Front.html delete mode 100644 doc/src/RM_H5G.html delete mode 100644 doc/src/RM_H5P.html delete mode 100644 doc/src/RM_H5R.html delete mode 100644 doc/src/RM_H5S.html delete mode 100644 doc/src/RM_H5T.html delete mode 100644 doc/src/RM_H5Z.html delete mode 100644 doc/src/Tools.html delete mode 100644 doc/tgif/APIGrammar.obj delete mode 100644 doc/tgif/FileGrammar.obj delete mode 100644 doc/tgif/IOPipe.obj delete mode 100644 doc/tgif/RobbPipe.obj delete mode 100644 doc/tgif/UserView.obj diff --git a/doc/Makefile.am b/doc/Makefile.am deleted file mode 100644 index 722ad03..0000000 --- a/doc/Makefile.am +++ /dev/null @@ -1,23 +0,0 @@ -# HDF5 Library Doc Makefile(.in) -# -# Copyright (C) 1997, 2002 -# National Center for Supercomputing Applications. -# All rights reserved. -# -# -# This is the top level makefile of the Doc directory. It mostly just -# reinvokes make in the various subdirectories. -# You can alternatively invoke make from each subdirectory manually. -## -## Makefile.am -## Run automake to generate a Makefile.in from this file. -# - -# Add doc-specific include -include $(top_srcdir)/config/commence-doc.am - -# Subdirectories in build-order -SUBDIRS=html - -# Don't include conclude, since the docs directory doesn't need to know how to -# build anything. diff --git a/doc/Makefile.in b/doc/Makefile.in deleted file mode 100644 index 981cf79..0000000 --- a/doc/Makefile.in +++ /dev/null @@ -1,619 +0,0 @@ -# Makefile.in generated by automake 1.9.5 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005 Free Software Foundation, Inc. -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# HDF5 Library Doc Makefile(.in) -# -# Copyright (C) 1997, 2002 -# National Center for Supercomputing Applications. -# All rights reserved. -# -# -# This is the top level makefile of the Doc directory. It mostly just -# reinvokes make in the various subdirectories. -# You can alternatively invoke make from each subdirectory manually. -# -srcdir = @srcdir@ -top_srcdir = @top_srcdir@ -VPATH = @srcdir@ -pkgdatadir = $(datadir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -top_builddir = .. -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -INSTALL = @INSTALL@ -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ - $(top_srcdir)/config/commence-doc.am \ - $(top_srcdir)/config/commence.am -subdir = doc -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/configure.in -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(SHELL) $(top_srcdir)/bin/mkinstalldirs -CONFIG_HEADER = $(top_builddir)/src/H5config.h -CONFIG_CLEAN_FILES = -SOURCES = -DIST_SOURCES = -RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ - html-recursive info-recursive install-data-recursive \ - install-exec-recursive install-info-recursive \ - install-recursive installcheck-recursive installdirs-recursive \ - pdf-recursive ps-recursive uninstall-info-recursive \ - uninstall-recursive -ETAGS = etags -CTAGS = ctags -DIST_SUBDIRS = $(SUBDIRS) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) - -# Set the paths for AFS installs of autotools for Linux machines -# Ideally, these tools should never be needed during the build. -ACLOCAL = /afs/ncsa/projects/hdf/packages/automake_1.9.5/Linux_2.4/bin/aclocal -I /afs/ncsa/projects/hdf/packages/libtool_1.5.14/Linux_2.4/share/aclocal -ADD_PARALLEL_FILES = @ADD_PARALLEL_FILES@ -AMDEP_FALSE = @AMDEP_FALSE@ -AMDEP_TRUE = @AMDEP_TRUE@ -AMTAR = @AMTAR@ -AM_MAKEFLAGS = @AM_MAKEFLAGS@ -AR = @AR@ -AUTOCONF = /afs/ncsa/projects/hdf/packages/autoconf_2.59/Linux_2.4/bin/autoconf -AUTOHEADER = /afs/ncsa/projects/hdf/packages/autoconf_2.59/Linux_2.4/bin/autoheader -AUTOMAKE = /afs/ncsa/projects/hdf/packages/automake_1.9.5/Linux_2.4/bin/automake -AWK = @AWK@ -BUILD_CXX_CONDITIONAL_FALSE = @BUILD_CXX_CONDITIONAL_FALSE@ -BUILD_CXX_CONDITIONAL_TRUE = @BUILD_CXX_CONDITIONAL_TRUE@ -BUILD_FORTRAN_CONDITIONAL_FALSE = @BUILD_FORTRAN_CONDITIONAL_FALSE@ -BUILD_FORTRAN_CONDITIONAL_TRUE = @BUILD_FORTRAN_CONDITIONAL_TRUE@ -BUILD_HDF5_HL_CONDITIONAL_FALSE = @BUILD_HDF5_HL_CONDITIONAL_FALSE@ -BUILD_HDF5_HL_CONDITIONAL_TRUE = @BUILD_HDF5_HL_CONDITIONAL_TRUE@ -BUILD_PABLO_CONDITIONAL_FALSE = @BUILD_PABLO_CONDITIONAL_FALSE@ -BUILD_PABLO_CONDITIONAL_TRUE = @BUILD_PABLO_CONDITIONAL_TRUE@ -BUILD_PARALLEL_CONDITIONAL_FALSE = @BUILD_PARALLEL_CONDITIONAL_FALSE@ -BUILD_PARALLEL_CONDITIONAL_TRUE = @BUILD_PARALLEL_CONDITIONAL_TRUE@ -BUILD_PDB2HDF = @BUILD_PDB2HDF@ -BUILD_PDB2HDF_CONDITIONAL_FALSE = @BUILD_PDB2HDF_CONDITIONAL_FALSE@ -BUILD_PDB2HDF_CONDITIONAL_TRUE = @BUILD_PDB2HDF_CONDITIONAL_TRUE@ -BYTESEX = @BYTESEX@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CC_VERSION = @CC_VERSION@ -CFLAGS = @CFLAGS@ -CONFIG_DATE = @CONFIG_DATE@ -CONFIG_MODE = @CONFIG_MODE@ -CONFIG_USER = @CONFIG_USER@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CXX = @CXX@ -CXXCPP = @CXXCPP@ -CXXDEPMODE = @CXXDEPMODE@ -CXXFLAGS = @CXXFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEBUG_PKG = @DEBUG_PKG@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -DYNAMIC_DIRS = @DYNAMIC_DIRS@ -ECHO = @ECHO@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -F77 = @F77@ - -# Make sure that these variables are exported to the Makefiles -F9XMODEXT = @F9XMODEXT@ -F9XMODFLAG = @F9XMODFLAG@ -F9XSUFFIXFLAG = @F9XSUFFIXFLAG@ -FC = @FC@ -FCFLAGS = @FCFLAGS@ -FCLIBS = @FCLIBS@ -FFLAGS = @FFLAGS@ -FILTERS = @FILTERS@ -FSEARCH_DIRS = @FSEARCH_DIRS@ -H5_VERSION = @H5_VERSION@ -HADDR_T = @HADDR_T@ -HDF5_INTERFACES = @HDF5_INTERFACES@ -HID_T = @HID_T@ -HL = @HL@ -HL_FOR = @HL_FOR@ -HSIZET = @HSIZET@ -HSIZE_T = @HSIZE_T@ -HSSIZE_T = @HSSIZE_T@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -INSTRUMENT_LIBRARY = @INSTRUMENT_LIBRARY@ -LDFLAGS = @LDFLAGS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LIBTOOL = @LIBTOOL@ -LN_S = @LN_S@ -LTLIBOBJS = @LTLIBOBJS@ -LT_STATIC_EXEC = @LT_STATIC_EXEC@ -MAINT = @MAINT@ -MAINTAINER_MODE_FALSE = @MAINTAINER_MODE_FALSE@ -MAINTAINER_MODE_TRUE = @MAINTAINER_MODE_TRUE@ -MAKEINFO = @MAKEINFO@ -MPE = @MPE@ -OBJECT_NAMELEN_DEFAULT_F = @OBJECT_NAMELEN_DEFAULT_F@ -OBJEXT = @OBJEXT@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PARALLEL = @PARALLEL@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PERL = @PERL@ -PTHREAD = @PTHREAD@ -RANLIB = @RANLIB@ -ROOT = @ROOT@ -RUNPARALLEL = @RUNPARALLEL@ -RUNSERIAL = @RUNSERIAL@ -R_INTEGER = @R_INTEGER@ -R_LARGE = @R_LARGE@ -SEARCH = @SEARCH@ -SETX = @SETX@ -SET_MAKE = @SET_MAKE@ - -# Hardcode SHELL to be /bin/sh. Most machines have this shell, and -# on at least one machine configure fails to detect its existence (janus). -# Also, when HDF5 is configured on one machine but run on another, -# configure's automatic SHELL detection may not work on the build machine. -SHELL = /bin/sh -SIZE_T = @SIZE_T@ -STATIC_SHARED = @STATIC_SHARED@ -STRIP = @STRIP@ -TESTPARALLEL = @TESTPARALLEL@ -TRACE_API = @TRACE_API@ -USE_FILTER_DEFLATE = @USE_FILTER_DEFLATE@ -USE_FILTER_FLETCHER32 = @USE_FILTER_FLETCHER32@ -USE_FILTER_NBIT = @USE_FILTER_NBIT@ -USE_FILTER_SCALEOFFSET = @USE_FILTER_SCALEOFFSET@ -USE_FILTER_SHUFFLE = @USE_FILTER_SHUFFLE@ -USE_FILTER_SZIP = @USE_FILTER_SZIP@ -VERSION = @VERSION@ -ac_ct_AR = @ac_ct_AR@ -ac_ct_CC = @ac_ct_CC@ -ac_ct_CXX = @ac_ct_CXX@ -ac_ct_F77 = @ac_ct_F77@ -ac_ct_FC = @ac_ct_FC@ -ac_ct_RANLIB = @ac_ct_RANLIB@ -ac_ct_STRIP = @ac_ct_STRIP@ -am__fastdepCC_FALSE = @am__fastdepCC_FALSE@ -am__fastdepCC_TRUE = @am__fastdepCC_TRUE@ -am__fastdepCXX_FALSE = @am__fastdepCXX_FALSE@ -am__fastdepCXX_TRUE = @am__fastdepCXX_TRUE@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_vendor = @build_vendor@ -datadir = @datadir@ -exec_prefix = @exec_prefix@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ - -# Install directories that automake doesn't know about -includedir = $(exec_prefix)/include -infodir = @infodir@ -install_sh = @install_sh@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localstatedir = @localstatedir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -oldincludedir = @oldincludedir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ - -# Shell commands used in Makefiles -RM = rm -f -CP = cp - -# Some machines need a command to run executables; this is that command -# so that our tests will run. -# We use RUNTESTS instead of RUNSERIAL directly because it may be that -# some tests need to be run with a different command. Older versions -# of the makefiles used the command -# $(LIBTOOL) --mode=execute -# in some directories, for instance. -RUNTESTS = $(RUNSERIAL) - -# Libraries to link to while building -LIBHDF5 = $(top_builddir)/src/libhdf5.la -LIBH5TEST = $(top_builddir)/test/libh5test.la -LIBH5F = $(top_builddir)/fortran/src/libhdf5_fortran.la -LIBH5FTEST = $(top_builddir)/fortran/test/libh5test_fortran.la -LIBH5CPP = $(top_builddir)/c++/src/libhdf5_cpp.la -LIBH5TOOLS = $(top_builddir)/tools/lib/libh5tools.la -LIBH5_HL = $(top_builddir)/hl/src/libhdf5_hl.la -LIBH5F_HL = $(top_builddir)/hl/fortran/src/libhdf5hl_fortran.la -LIBH5CPP_HL = $(top_builddir)/hl/c++/src/libhdf5_hl_cpp.la -docdir = $(exec_prefix)/doc - -# Scripts used to build examples -H5CC = $(bindir)/h5cc -H5CC_PP = $(bindir)/h5pcc -H5FC = $(bindir)/h5fc -H5FC_PP = $(bindir)/h5pfc - -# .chkexe and .chksh files are used to mark tests that have run successfully. -MOSTLYCLEANFILES = *.chkexe *.chksh - -# Add doc-specific include - -# Subdirectories in build-order -SUBDIRS = html -all: all-recursive - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/config/commence-doc.am $(top_srcdir)/config/commence.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \ - && exit 0; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign doc/Makefile'; \ - cd $(top_srcdir) && \ - $(AUTOMAKE) --foreign doc/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -mostlyclean-libtool: - -rm -f *.lo - -clean-libtool: - -rm -rf .libs _libs - -distclean-libtool: - -rm -f libtool -uninstall-info-am: - -# This directory's subdirectories are mostly independent; you can cd -# into them and run `make' without going through this Makefile. -# To change the values of `make' variables: instead of editing Makefiles, -# (1) if the variable is set in `config.status', edit `config.status' -# (which will cause the Makefiles to be regenerated when you run `make'); -# (2) otherwise, pass the desired values on the `make' command line. -$(RECURSIVE_TARGETS): - @failcom='exit 1'; \ - for f in x $$MAKEFLAGS; do \ - case $$f in \ - *=* | --[!k]*);; \ - *k*) failcom='fail=yes';; \ - esac; \ - done; \ - dot_seen=no; \ - target=`echo $@ | sed s/-recursive//`; \ - list='$(SUBDIRS)'; for subdir in $$list; do \ - echo "Making $$target in $$subdir"; \ - if test "$$subdir" = "."; then \ - dot_seen=yes; \ - local_target="$$target-am"; \ - else \ - local_target="$$target"; \ - fi; \ - (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ - || eval $$failcom; \ - done; \ - if test "$$dot_seen" = "no"; then \ - $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ - fi; test -z "$$fail" - -mostlyclean-recursive clean-recursive distclean-recursive \ -maintainer-clean-recursive: - @failcom='exit 1'; \ - for f in x $$MAKEFLAGS; do \ - case $$f in \ - *=* | --[!k]*);; \ - *k*) failcom='fail=yes';; \ - esac; \ - done; \ - dot_seen=no; \ - case "$@" in \ - distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ - *) list='$(SUBDIRS)' ;; \ - esac; \ - rev=''; for subdir in $$list; do \ - if test "$$subdir" = "."; then :; else \ - rev="$$subdir $$rev"; \ - fi; \ - done; \ - rev="$$rev ."; \ - target=`echo $@ | sed s/-recursive//`; \ - for subdir in $$rev; do \ - echo "Making $$target in $$subdir"; \ - if test "$$subdir" = "."; then \ - local_target="$$target-am"; \ - else \ - local_target="$$target"; \ - fi; \ - (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ - || eval $$failcom; \ - done && test -z "$$fail" -tags-recursive: - list='$(SUBDIRS)'; for subdir in $$list; do \ - test "$$subdir" = . || (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ - done -ctags-recursive: - list='$(SUBDIRS)'; for subdir in $$list; do \ - test "$$subdir" = . || (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ - done - -ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) - list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ - mkid -fID $$unique -tags: TAGS - -TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ - $(TAGS_FILES) $(LISP) - tags=; \ - here=`pwd`; \ - if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ - include_option=--etags-include; \ - empty_fix=.; \ - else \ - include_option=--include; \ - empty_fix=; \ - fi; \ - list='$(SUBDIRS)'; for subdir in $$list; do \ - if test "$$subdir" = .; then :; else \ - test ! -f $$subdir/TAGS || \ - tags="$$tags $$include_option=$$here/$$subdir/TAGS"; \ - fi; \ - done; \ - list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ - if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ - test -n "$$unique" || unique=$$empty_fix; \ - $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ - $$tags $$unique; \ - fi -ctags: CTAGS -CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ - $(TAGS_FILES) $(LISP) - tags=; \ - here=`pwd`; \ - list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ - test -z "$(CTAGS_ARGS)$$tags$$unique" \ - || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ - $$tags $$unique - -GTAGS: - here=`$(am__cd) $(top_builddir) && pwd` \ - && cd $(top_srcdir) \ - && gtags -i $(GTAGS_ARGS) $$here - -distclean-tags: - -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags - -distdir: $(DISTFILES) - $(mkdir_p) $(distdir)/../config - @srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's|.|.|g'`; \ - list='$(DISTFILES)'; for file in $$list; do \ - case $$file in \ - $(srcdir)/*) file=`echo "$$file" | sed "s|^$$srcdirstrip/||"`;; \ - $(top_srcdir)/*) file=`echo "$$file" | sed "s|^$$topsrcdirstrip/|$(top_builddir)/|"`;; \ - esac; \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - dir=`echo "$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test "$$dir" != "$$file" && test "$$dir" != "."; then \ - dir="/$$dir"; \ - $(mkdir_p) "$(distdir)$$dir"; \ - else \ - dir=''; \ - fi; \ - if test -d $$d/$$file; then \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \ - fi; \ - cp -pR $$d/$$file $(distdir)$$dir || exit 1; \ - else \ - test -f $(distdir)/$$file \ - || cp -p $$d/$$file $(distdir)/$$file \ - || exit 1; \ - fi; \ - done - list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ - if test "$$subdir" = .; then :; else \ - test -d "$(distdir)/$$subdir" \ - || $(mkdir_p) "$(distdir)/$$subdir" \ - || exit 1; \ - distdir=`$(am__cd) $(distdir) && pwd`; \ - top_distdir=`$(am__cd) $(top_distdir) && pwd`; \ - (cd $$subdir && \ - $(MAKE) $(AM_MAKEFLAGS) \ - top_distdir="$$top_distdir" \ - distdir="$$distdir/$$subdir" \ - distdir) \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-recursive -all-am: Makefile -installdirs: installdirs-recursive -installdirs-am: -install: install-recursive -install-exec: install-exec-recursive -install-data: install-data-recursive -uninstall: uninstall-recursive - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-recursive -install-strip: - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - `test -z '$(STRIP)' || \ - echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install -mostlyclean-generic: - -test -z "$(MOSTLYCLEANFILES)" || rm -f $(MOSTLYCLEANFILES) - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." -clean: clean-recursive - -clean-am: clean-generic clean-libtool mostlyclean-am - -distclean: distclean-recursive - -rm -f Makefile -distclean-am: clean-am distclean-generic distclean-libtool \ - distclean-tags - -dvi: dvi-recursive - -dvi-am: - -html: html-recursive - -info: info-recursive - -info-am: - -install-data-am: - -install-exec-am: - -install-info: install-info-recursive - -install-man: - -installcheck-am: - -maintainer-clean: maintainer-clean-recursive - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-recursive - -mostlyclean-am: mostlyclean-generic mostlyclean-libtool - -pdf: pdf-recursive - -pdf-am: - -ps: ps-recursive - -ps-am: - -uninstall-am: uninstall-info-am - -uninstall-info: uninstall-info-recursive - -.PHONY: $(RECURSIVE_TARGETS) CTAGS GTAGS all all-am check check-am \ - clean clean-generic clean-libtool clean-recursive ctags \ - ctags-recursive distclean distclean-generic distclean-libtool \ - distclean-recursive distclean-tags distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-exec install-exec-am install-info \ - install-info-am install-man install-strip installcheck \ - installcheck-am installdirs installdirs-am maintainer-clean \ - maintainer-clean-generic maintainer-clean-recursive \ - mostlyclean mostlyclean-generic mostlyclean-libtool \ - mostlyclean-recursive pdf pdf-am ps ps-am tags tags-recursive \ - uninstall uninstall-am uninstall-info-am - - -# Ignore most rules -lib progs check test _test check-p check-s: - @echo "Nothing to be done" - -tests dep depend: - @@SETX@; for d in X $(SUBDIRS); do \ - if test $$d != X; then \ - (cd $$d && $(MAKE) $(AM_MAKEFLAGS) $@) || exit 1; \ - fi; - done - -# In docs directory, install-doc is the same as install -install-doc install-all: - $(MAKE) $(AM_MAKEFLAGS) install -uninstall-doc uninstall-all: - $(MAKE) $(AM_MAKEFLAGS) uninstall - -# Don't include conclude, since the docs directory doesn't need to know how to -# build anything. -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/doc/html/ADGuide.html b/doc/html/ADGuide.html deleted file mode 100644 index 5ac4d71..0000000 --- a/doc/html/ADGuide.html +++ /dev/null @@ -1,266 +0,0 @@ - - - - HDF5 Application Developer's Guide - - - - - - - - - - - -
-
- - - -
- HDF5 documents and links 
- Introduction to HDF5 
- -
- HDF5 User's Guide 
- HDF5 Reference Manual 
- HDF5 Application Developer's Guide 
-
-
-
-
-

HDF5 Application Developer's Guide

-
- - -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- These documents provide information of particular interest to - developers of applications that employ the HDF5 library. -

-

 
- HDF5 Library Changes -
    - from Release to Release -
A summary of changes in the HDF5 - library -
 
- Supported Configuration -
    - Features Summary -
A summary of configuration features - supported in this release -    - (external link) -
 
- HDF5 Image and -
    - Palette Specification -
A specification for the implementation - of images and palettes in HDF5 applications -
 
- Mapping HDF4 Objects -
    - to HDF5 Objects -
Guidelines for translating - HDF4 file objects into valid HDF5 file objects   - (PDF format only) -
 
- Fill Value and Space -
    - Allocation Issues -
A summary of HDF5 fill value and storage allocation issues   - (external link) -
- Fill Value and Space -
    - Allocation Behavior -
A table summarizing of the behavioral interactions - of HDF5 fill value and storage allocation settings   - (external link) -
 
- SZIP Compression -
    - - in HDF5 -
A description of SZIP compression in HDF5, - H5Pset_szip, terms of use and copyright notice, - and references   - (external link) -
 
- Shuffle Performance - An analysis of bzip and gzip compression - performance in HDF5 with and without the shuffle filter, - H5Pset_shuffle   - (external link) -
 
- Generic Properties - An overview of and the motivation for - the implementation and use of generic properties in HDF5   - (external link) -
 
- Error-detecting Codes -
    - - for HDF5 -
A discussion of error-detection codes, - e.g., checksums, in HDF5   - (external link) -
- Fletcher32 Checksum -
    - - Design and Spec -
Design, API function specification, and test - for the Fletcher32 checksum implementation in HDF5   - (external link) -
 
-
- The HDF5 source code, as distributed to users and developers, - contains two additional files that will be of interest to readers - of this document. Both files are located at the top level of the - HDF5 source code tree and are duplicated here for your reference: -
- RELEASE.txt - - Technical notes regarding this release -
- HISTORY.txt - - A release-by-release history of the HDF5 library -
-
- -
-
- - -
-
- - - -
- HDF5 documents and links 
- Introduction to HDF5 
- -
- HDF5 User's Guide 
- HDF5 Reference Manual 
- HDF5 Application Developer's Guide 
-
-
-
-
- - - -
- -
-HDF Help Desk -
-Describes HDF5 Release 1.7, the unreleased development branch; working toward HDF5 Release 1.8.0 -
- -Last modified: 3 July 2003 - -
-Copyright   -
- - - diff --git a/doc/html/ADGuide/Changes.html b/doc/html/ADGuide/Changes.html deleted file mode 100755 index 813da28..0000000 --- a/doc/html/ADGuide/Changes.html +++ /dev/null @@ -1,1086 +0,0 @@ - - - HDF5 Software Changes - - - - - - - - - - -
-
- - - -
- HDF5 documents and links 
- Introduction to HDF5 
- -
- HDF5 User's Guide 
- HDF5 Reference Manual 
- HDF5 Application Developer's Guide 
-
-
-
-
-

HDF5 Software Changes from Release to Release

-
- - - -This document is intended to assist application developers who must keep -an application synchronized with the HDF5 library or related software. - - -

Release 1.7.x (current release) versus Release 1.6

- - - -

Deleted Functions

- The following functions are removed from HDF5 Release 1.7.x as the GASS - virtual file driver has been retired. - - - - -
-         - -
-H5Pget_fapl_gass
-
-
-        - -
-H5Pset_fapl_gass
-
-
-        - -
-
-
-

  - -

Release 1.6.0 (current release) versus Release 1.4.5

- - - -This section lists the API-level changes that have been made in the -transition from the HDF5 Release 1.4.x series to Release 1.6.0. - - -

New Functions and Tools

-
-
The following functions are new for Release 1.6.0 and are included in the - HDF5 Reference Manual. - -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
H5Dget_offsethsize_t H5Dget_offset (hid_t dset_id)
H5Dget_space_statushid_t H5Dget_space_status (hid_t - dset_id, H5D_space_status_t *status) -
H5Fget_obj_ids - int H5Fget_obj_ids (hid_t file_id, - unsigned int types, - int max_objs, hid_t *obj_id_list) -
H5Fget_vfd_handle - herr_t H5Fget_vfd_handle (hid_t file_id, - hid_t fapl_id, void *file_handle) -
H5Gget_num_objs - herr_t H5Gget_num_objs (hid_t loc_id, - hsize_t* num_obj) -
H5Gget_objname_by_idx - ssize_t H5Gget_objname_by_idx (hid_t group_id, - hsize_t idx, char *name, - size_t* size) -
H5Gget_objtype_by_idx - int H5Gget_objtype_by_idx (hid_t group_id, - hsize_t idx) -
H5Iget_name - ssize_t H5Iget_name (hid_t obj_id, - char *name, size_t size) -
H5Pall_filters_avail - htri_t H5Pall_filters_avail (hid_t dcpl_id) -
H5Pfill_value_defined - herr_t H5Pfill_value_defined (hid_t plist_id, - H5D_fill_value_t *status) -
H5Pget_alloc_time - herr_t H5Pget_alloc_time (hid_t plist_id, - H5D_alloc_time_t *alloc_time) -
H5Pget_edc_check - H5Z_EDC_t H5Pget_edc_check (hid_t - plist) -
H5Pget_family_offset - herr_t H5Pget_family_offset (hid_t fapl_id, - hsize_t *offset) -
H5Pget_fapl_mpiposix - herr_t H5Pget_fapl_mpiposix (hid_t fapl_id, - MPI_Comm *comm) -
H5Pget_fill_time - herr_t H5Pget_fill_time (hid_t plist_id, - H5D_fill_time_t *fill_time) -
H5Pget_filter_by_id - herr_t H5Pget_filter_by_id (hid_t plist_id, - H5Z_filter_t filter, unsigned int *flags, - size_t *cd_nelmts, unsigned int cd_values[], - size_t namelen, char *name[]) -
H5Pget_hyper_vector_size - herr_t H5Pget_hyper_vector_size (hid_t dxpl_id, - size_t *vector_size) -
H5Pget_multi_type - herr_t H5Pget_multi_type (hid_t fapl_id, - H5FD_mem_t *type) -
H5Pmodify_filter - herr_t H5Pmodify_filter (hid_t plist, - H5Z_filter_t filter, unsigned int flags, - size_t cd_nelmts, const unsigned int cd_values[]) -
H5Pset_alloc_time - herr_t H5Pset_alloc_time (hid_t - plist_id, H5D_alloc_time_t alloc_time) -
H5Pset_edc_check - herr_t H5Pset_edc_check (hid_t - plist, H5Z_EDC_t check) -
H5Pset_family_offset - herr_t H5Pset_family_offset (hid_t fapl_id, - hsize_t offset) -
H5Pset_fapl_mpiposix - herr_t H5Pset_fapl_mpiposix (hid_t fapl_id, - MPI_Comm comm) -
H5Pset_fill_time - herr_t H5Pset_fill_time (hid_t plist_id, - H5D_fill_time_t fill_time) -
H5Pset_filter - herr_t H5Pset_filter - (hid_t plist, H5Z_filter_t filter, - unsigned int flags, size_t cd_nelmts, - const unsigned int cd_values[]) -
H5Pset_filter_callback - herr_t H5Pset_filter_callback (hid_t - plist, H5Z_filter_func_t func, - void *op_data) -
H5Pset_fletcher32 - herr_t H5Pset_fletcher32 (hid_t - plist) -
H5Pset_hyper_vector_size  - herr_t H5Pset_hyper_vector_size (hid_t dxpl_id, - size_t vector_size) -
H5Pset_multi_type - herr_t H5Pset_multi_type (hid_t fapl_id, - H5FD_mem_t type) -
H5Pset_shuffle - herr_t H5Pset_shuffle (hid_t plist_id) -
H5Pset_szip - herr_t H5Pset_szip (hid_t plist, - unsigned int options_mask, unsigned int - pixels_per_block) -
H5Rget_object_type - int H5Rget_object_type (hid_t id, - void *ref) -
H5set_free_list_limits - herr_t H5set_free_list_limits (int reg_global_lim, - int reg_list_lim, int arr_global_lim, - int arr_list_lim, int blk_global_lim, - int blk_list_lim) -
H5Sget_select_type - H5S_sel_type H5Sget_select_type (hid_t space_id) -
H5Tdetect_class - htri_t H5Tdetect_class (hid_t dtype_id, - H5T_class_t dtype_class) -
H5Tget_native_type - hid_t H5Tget_native_type (hid_t type_id, - H5T_direction_t direction) -
H5Tis_variable_str - htri_t H5Tis_variable_str (hid_t dtype_id) -
H5Zfilter_avail - herr_t H5Zfilter_avail (H5Z_filter_t filter) -
H5Zunregister - herr_t H5Zunregister (H5Z_filter_t filter) -
-
-
 
- - -
The following tools are new for Release 1.5 and are included in the - HDF5 Reference Manual. -
h5diff -
h5import -
h5fc -
h5c++ -
h5perf -
h5redeploy - -
- - -

Deleted Functions

- The following functions are deprecated in HDF5 Release 1.6.0. - A backward compatibility mode is provided in this release, - enabling these functions and other Release 1.4.x compatibility - features, but is available only when the HDF5 library is - configured with the flag H5_WANT_H5_V1_4_COMPAT. - The backward compatibility mode is not enabled in the - binaries distributed by NCSA. - - - - -
-         - -
-H5Pset_hyper_cache
-H5Pget_hyper_cache
-
-
-        - -
-H5Rget_object_type
-
-
-        - -
 
-
-
-
-
-

- The above functions will eventually be removed from the HDF5 - distribution and from the HDF5 Reference Manual. - - -

Functions with Changed Syntax

- The following functions have changed as noted. - -
-
C functions: -
H5FDflush and VFL "flush" callbacks -
An extra parameter closing has been added to - these functions, - to allow the library to indicate that the file will be closed - following the call to "flush". Actions in the "flush" call - that are duplicated in the VFL "close" call may be omitted by - the VFL driver. -
* H5Gget_objtype_by_idx -
The function return type has changed from - int to - the enumerated type H5G_obj_t. -
* H5Pset(get)_buffer -
The size parameter for H5Pset_buffer - has changed from type hsize_t to - size_t. -
The H5Pget_buffer return type has similarly - changed from hsize_t to - size_t. -
* H5Pset(get)_cache -
The rdcc_nelmts parameter has changed from type - int to - size_t. -
* H5Pset_fapl_log -
The verbosity parameter has been removed. -
Two new parameters have been added: - flags of type unsigned and - buf_size of type size_t. -
* H5Pset(get)_fapl_mpiposix -
A use_gpfs parameter of type - hbool_t has been added. -
* H5Pset(get)_sieve_buf_size -
The size parameter has changed from type - hsize_t to - size_t. -
* H5Pset(get)_sym_k -
The lk parameter has changed from type - int to - unsigned. -
* H5Sget_select_bounds -
The start and end parameters have - changed from type hsize_t * - to hssize_t * to better match the - rest of the dataspace API. -
* H5Zregister -
This function is substantially revised in Release 1.6.0 with - a new H5Z_class_t struct and - new set local and can apply callback functions. -
  -
Fortran90 functions: -
h5pset(get)_fapl_core_f -
The backing_store parameter has changed from - INTEGER to LOGICAL - to better match the C API. -
h5pset(get)_preserve_f -
The flag parameter has changed from - INTEGER to LOGICAL - to better match the C API. -
-
- -

- Backward compatibility with the Release 1.4.x syntax is available - for the functions indicated above with a leading asterisk (*). - The backward compatibility features are available only when the - HDF5 library is configured with the flag - H5_WANT_H5_V1_4_COMPAT, - is not enabled in the binaries distributed by NCSA, and - will eventually be removed from the HDF5 distribution. - - -

Constants with Changed Values

- - -
-

  - - - - - - -

Release 1.4.5 versus Release 1.4.4

- -

C Library

- - - Added functions: - - - -
-
-herr_t H5Pset_fapl_mpiposix(hid_t fapl_id, MPI_Comm comm);
-herr_t H5Pget_fapl_mpiposix(hid_t fapl_id, MPI_Comm *comm/*out*/);
-
-
-
- - Changed functions: - - The following functions have changed behavior. -

- - -
- H5Pset_fapl_mpio  
- H5Pget_fapl_mpio
- H5Fcreate
- H5Fopen
- H5Fclose -
- Previously, the Communicator and Info object arguments supplied - to H5Pset_fapl_mpio were stored in the property with - its handle values. - This meant changes to the communicator or the Info object - after calling H5Pset_fapl_mpio would affect the how - the property list functioned. - This was also the case when H5Fopen/create operated. - They just stored the handle value. This is not according to the - MPI-2 defined behavior of how Info objects should be handled. - (MPI-2 defines Info objects must be parsed when called.)
- H5Pset_fapl_mpio now stores a duplicate of each of - the communicator and Info object.
- H5Pget_fapl_mpio now returns a duplicate of its - stored communicator and Info object. - It is now the responsibility of the applications to free - those objects when done.

- H5Fcreate and H5Fopen also store - a duplicate of the communicator and Info - object supplied by the file access property List. - H5Fclose frees the duplicates.

- Advice to users: User applications should release the communicator and - Info object returned by H5Pget_fapl_mpio when they are - no longer needed. -

-

- - - - Removed functions: - - - -
-None - - -        - -
-          
-
-
-
- - There were no other public API changes in the C library for this release. - -
-

Fortran90 Library

- - - - The following missing Fortran functions were added: -
-
- h5get_libversion_f, h5check_version_f, h5garbage_collect_f, h5dont_atexit_f -
- h5tget_member_index_f, h5tvlen_create_f -
- h5dget_storage_size_f, h5dvlen_get_max_len_f , h5dwrite_vl_f, h5dread_vl_f -
-
- Only integer, real and - character types are supported for VL datatypes. - -
-
- - - - -

Release 1.4.4 versus Release 1.4.3

- -

C Library

- - - Added functions: - - - -
-
-H5Pget_small_data_block_size
-H5Pset_small_data_block_size
-H5Tget_member_index
-
-
-        - -
-          
-
-
-
- - Changed functions: - - - -
-None - - -        - -
-          
-
-
-
- - Removed functions: - - - -
-None - - -        - -
-          
-
-
-
- - There were no other public API changes in the C library for this release. - -
-

Fortran90 Library

- - - h5dwrite_f, h5dread_f, h5awrite_f, h5aread_f were overloaded - with dims argument to be assumed size array of type INTEGER(HSIZE_T). - We recommend to use the subroutines with the new type. Module subroutines - that accept dims as INTEGER array of size 7 will be deprecated in the 1.6.0 release. - - -
- -

Release 1.4.3 versus Release 1.4.2

- -

C Library

- - - Removed function: - - - -
-
-H5Pset_fapl_dpss 
-
-        - -
-          
-
-
-        - -
-          
-
-
-
- - There were no other public API changes in the C library for this release. - -
-

Fortran90 Library

- - - There were no changes to the public Fortran90 APIs for this release. - - -
- -

Release 1.4.2 versus Release 1.4.1

- -

C Library

- The HDF5 Release 1.4.2 C library is a "Bugfix Release"; - there are no API changes in the underlying HDF5 library. - -

Fortran90 Library

- The following functions in the HDF5 Release 1.4.2 Fortran90 library - have an additional parameter, dims, that was not present - in Release 1.4.1: - - -
-h5aread_f(attr_id, memtype_id,  buf, dims, hdferr)
-h5awrite_f(attr_id, memtype_id,  buf, dims, hdferr)
-h5dread_f(dset_id, mem_type_id, buf, dims, hdferr, mem_space_id, &
-           file_space_id, xfer_prp)
-h5dwrite_f(dset_id, mem_type_id, buf, dims, hdferr, mem_space_id, &
-           file_space_id, xfer_prp)
-
-
- - The dims parameter enables library portability - between the UNIX and Microsoft Windows platforms. - -
- - -

Release 1.4.1 versus Release 1.4.0

- - Release 1.4.1 is a "Tools Release"; there are no API changes in the - underlying HDF5 library. - - - -

Release 1.4.0 versus Release 1.2.2

- - - - Several functions were added to or removed from the HDF5 library - in the development of Release 1.4.0. - A few functions have been modified in minor ways. - -

New Functions

- The following functions are new for Release 1.4.0 and are included in the - HDF5 Reference Manual. - - -
-herr_t H5Dvlen_get_buf_size (hid_t dataset_id, hid_t type_id, 
-           hid_t space_id, hsize_t *size);
-herr_t H5Epush (const char *file, const char *func,
-           unsigned line, H5E_major_t maj, H5E_minor_t min, 
-           const char *str);
-hid_t H5Pget_driver (hid_t plist_id);
-void *H5Pget_driver_info (hid_t plist_id);
-herr_t H5Pget_dxpl_mpio (hid_t dxpl_id, 
-           H5FD_mpio_xfer_t *xfer_mode/*out*/);
-herr_t H5Pget_dxpl_multi (hid_t dxpl_id, 
-           hid_t *memb_dxpl/*out*/);
-herr_t H5Pget_fapl_core (hid_t fapl_id, size_t *increment/*out*/,
-           hbool_t *backing_store/*out*/) 
-herr_t H5Pget_fapl_family (hid_t fapl_id, 
-           hsize_t *memb_size/*out*/, hid_t *memb_fapl_id/*out*/);
-herr_t H5Pget_fapl_mpio (hid_t fapl_id, MPI_Comm *comm/*out*/,
-           MPI_Info *info/*out*/);
-herr_t H5Pget_fapl_multi (hid_t fapl_id, 
-           H5FD_mem_t *memb_map/*out*/, hid_t *memb_fapl/*out*/, 
-           char **memb_name/*out*/, haddr_t *memb_addr/*out*/, 
-           hbool_t *relax/*out*/);
-herr_t H5Pget_fapl_stream (hid_t fapl_id,
-           H5FD_stream_fapl_t *fapl /*out*/ );
-herr_t H5Pget_meta_block_size (hid_t fapl_id, 
-           hsize_t *size/*out*/);
-herr_t H5Pget_sieve_buf_size (hid_t fapl_id, 
-           hsize_t *size/*out*/);
-herr_t H5Pset_driver (hid_t plist_id, hid_t driver_id,
-           const void *driver_info);
-herr_t H5Pset_dxpl_mpio (hid_t dxpl_id, 
-           H5FD_mpio_xfer_t xfer_mode);
-herr_t H5Pset_dxpl_multi (hid_t dxpl_id, 
-           const hid_t *memb_dxpl);
-herr_t H5Pset_fapl_core (hid_t fapl_id, size_t increment, 
-           hbool_t backing_store)
-herr_t H5Pset_fapl_family (hid_t fapl_id, hsize_t memb_size,
-           hid_t memb_fapl_id);
-herr_t H5Pset_fapl_log (hid_t fapl_id, char *logfile, 
-           int verbosity);
-herr_t H5Pset_fapl_mpio (hid_t fapl_id, MPI_Comm comm, 
-           MPI_Info info);
-herr_t H5Pset_fapl_multi (hid_t fapl_id, 
-           const H5FD_mem_t *memb_map, const hid_t *memb_fapl, 
-           const char **memb_name, const haddr_t *memb_addr, 
-           hbool_t relax);
-herr_t H5Pset_fapl_sec2 (hid_t fapl_id);
-herr_t H5Pset_fapl_split (hid_t fapl, const char *meta_ext,
-           hid_t meta_plist_id, const char *raw_ext,
-           hid_t raw_plist_id);
-herr_t H5Pset_fapl_stdio (hid_t fapl_id);
-herr_t H5Pset_fapl_stream (hid_t fapl_id,
-           H5FD_stream_fapl_t *fapl);
-herr_t H5Pset_meta_block_size(hid_t fapl_id, hsize_t size);
-herr_t H5Pset_sieve_buf_size(hid_t fapl_id, hsize_t size);
-hid_t H5Tarray_create (hid_t base, int rank, const hsize_t dims[], 
-           const int perm[])
-int H5Tget_array_dims (hid_t adtype_id, hsize_t *dims[], int *perm[])
-int H5Tget_array_ndims (hid_t adtype_id)
-
-
- - -

- The following functions are new for Release 1.4.0, but - are intended only for use in specialized environments. - These are also included in the - HDF5 Reference Manual. - -

-
-herr_t H5Pget_fapl_dpss (hid_t fapl_id);
-herr_t H5Pget_fapl_gass (hid_t fapl_id, GASS_Info *info/*out*/);
-herr_t H5Pget_fapl_srb (hid_t fapl_id, SRB_Info *info);
-herr_t H5Pset_fapl_dpss (hid_t fapl_id);
-herr_t H5Pset_fapl_gass (hid_t fapl_id, GASS_Info info);
-herr_t H5Pset_fapl_srb (hid_t fapl_id, SRB_Info info);
-
-
- - -

- The following functions are new for Release 1.4.0 but are intended - only for driver development work, not for general use. - They are listed in the - List of VFL Functions - document in the - HDF5 Technical Notes. - They are described in detail only in the source code and - do not appear in the HDF5 Reference Manual. - -

-
-haddr_t H5FDalloc (H5FD_t *file, H5FD_mem_t type, 
-          hsize_t size);
-herr_t H5FDclose (H5FD_t *file);
-int H5FDcmp (const H5FD_t *f1, const H5FD_t *f2);
-herr_t H5FDflush (H5FD_t *file);
-herr_t H5FDfree (H5FD_t *file, H5FD_mem_t type, 
-          haddr_t addr, hsize_t size);
-haddr_t H5FDget_eoa (H5FD_t *file);
-haddr_t H5FDget_eof (H5FD_t *file);
-H5FD_t *H5FDopen (const char *name, unsigned flags, 
-          hid_t fapl_id, haddr_t maxaddr);
-int H5FDquery (const H5FD_t *f, unsigned long *flags);
-herr_t H5FDread (H5FD_t *file, hid_t dxpl_id, haddr_t addr, 
-          hsize_t size, void *buf/*out*/);
-haddr_t H5FDrealloc (H5FD_t *file, H5FD_mem_t type, 
-          haddr_t addr, hsize_t old_size, hsize_t new_size);
-hid_t H5FDregister (const H5FD_class_t *cls);
-herr_t H5FDset_eoa (H5FD_t *file, haddr_t eof);
-herr_t H5FDunregister (hid_t driver_id);
-herr_t H5FDwrite (H5FD_t *file, H5FD_mem_t type, 
-          hid_t dxpl_id, haddr_t addr, hsize_t size, 
-          const void *buf);
-
-
- - -

Deleted Functions

- The following functions have been removed from the HDF5 library - and from the HDF5 Reference Manual. - - - -
-
-H5Pget_core
-H5Pget_driver
-H5Pget_family
-H5Pget_mpi
-H5Pget_sec2
-H5Pget_split
-H5Pget_stdio
-H5Pget_xfer
-
-
-        - -
-H5Pset_core
-H5Pset_family
-H5Pset_mpi
-H5Pset_sec2
-H5Pset_split
-H5Pset_stdio
-H5Pset_xfer
-
-
-        - -
-H5RAclose
-H5RAcreate
-H5RAopen
-H5RAread
-H5RAwrite
-H5Tget_member_dims
-H5Tinsert_array
-
-
-
- Note that the entire H5RA interface, an experimental interface for - ragged arrays, has been removed from the library. - - -

Functions with Changed Syntax

- The following functions have changed slightly. - -
-
H5Pget_buffer -
Return type has changed to hsize_t. -
H5Pset_buffer -
The type of the size parameter has changed - to hsize_t. -
H5Tconvert -
The type of the nelmts parameter has changed - to hsize_t. -
-
- -

Constants with Changed Values

- The values of the constants H5P_DEFAULT and - H5S_ALL have been changed from -2 - to 0. - These default values had to be special-cased in situations where - they could be returned to distinguish them from error values. - -
-

  - -

Migration from Release 1.2.2 to Release 1.4.x

- - -

H5Tinsert_array

- The functionality of H5Tinsert_array has been replaced by - H5Tarray_create. - Here is an example of changing code from H5Tinsert_array - to H5Tarray_create. -
-V1.2.2
-{
-    struct tmp_struct {
-        int a;
-        float f[3];
-        double d[2][4];
-    };
-    size_t f_dims[1]={3};
-    size_t d_dims[2]={2,4};
-    hid_t compound_type;
-
-    compound_type=H5Tcreate(H5T_COMPOUND,sizeof(struct tmp_struct));
-    H5Tinsert(compound_type,"a",HOFFSET(struct tmp_struct,a),H5T_NATIVE_INT);
-    H5Tinsert_array(compound_type,"f",HOFFSET(struct tmp_struct,f),1,f_dims,NULL,H5T_NATIVE_FLOAT);
-    H5Tinsert_array(compound_type,"d",HOFFSET(struct tmp_struct,d),2,d_dims,NULL,H5T_NATIVE_DOUBLE);
-}
-
-V1.4.0
-{
-    struct tmp_struct {
-        int a;
-        float f[3];
-        double d[2][4];
-    };
-    hsize_t f_dims[1]={3};
-    hsize_t d_dims[2]={2,4};
-    hid_t compound_type;
-    hid_t array_type;
-
-    compound_type=H5Tcreate(H5T_COMPOUND,sizeof(struct tmp_struct));
-    H5Tinsert(compound_type,"a",HOFFSET(struct tmp_struct,a),H5T_NATIVE_INT);
-    array_type=H5Tarray_create(H5T_NATIVE_FLOAT,1,f_dims,NULL);
-    H5Tinsert(compound_type,"f",HOFFSET(struct tmp_struct,f),array_type);
-    H5Tclose(array_type);
-    array_type=H5Tarray_create(H5T_NATIVE_DOUBLE,2,d_dims,NULL);
-    H5Tinsert(compound_type,"d",HOFFSET(struct tmp_struct,d),array_type);
-    H5Tclose(array_type);
-}
-
-
-
- - -

This and Prior Releases: The RELEASE.txt and HISTORY.txt Files

- - - The HDF5 source code, as distributed to users and developers, - contains two files that will be of interest to readers of this - document. Both files are located at the top level of the - source code tree and are duplicated here for your reference: -

-

-
RELEASE.txt -
Technical notes regarding the release, including - new features and the changes since the last release, - notes regarding new or revised utilities, - notes regarding alternative language APIs (Fortran90, C++), - bugs fixed since the last release, - platforms on which the release has been tested, and - known problems. - This is the file commonly known as "the release notes." -

-

HISTORY.txt -
A release-by-release history of the HDF5 library. - This file is a compiled set of the release notes - (i.e., the RELEASE.txt files) from prior releases. -
-
- - - - -
-
- - - -
- HDF5 documents and links 
- Introduction to HDF5 
- -
- HDF5 User's Guide 
- HDF5 Reference Manual 
- HDF5 Application Developer's Guide 
-
-
-
-
-HDF Help Desk -
-Describes HDF5 Release 1.7, the unreleased development branch; working toward HDF5 Release 1.8.0 -
- -Last modified: 3 March 2005 - - - - diff --git a/doc/html/ADGuide/H4toH5Mapping.doc b/doc/html/ADGuide/H4toH5Mapping.doc deleted file mode 100755 index 2f9340f..0000000 Binary files a/doc/html/ADGuide/H4toH5Mapping.doc and /dev/null differ diff --git a/doc/html/ADGuide/H4toH5Mapping.pdf b/doc/html/ADGuide/H4toH5Mapping.pdf deleted file mode 100644 index 548912b..0000000 Binary files a/doc/html/ADGuide/H4toH5Mapping.pdf and /dev/null differ diff --git a/doc/html/ADGuide/HISTORY.txt b/doc/html/ADGuide/HISTORY.txt deleted file mode 100644 index b6f2585..0000000 --- a/doc/html/ADGuide/HISTORY.txt +++ /dev/null @@ -1,3180 +0,0 @@ -HDF5 HISTORY -============ -This file contains history of the HDF5 libraries releases - -CONTENTS - -13. Release Information for hdf5-1.4.5 -12. Release Information for hdf5-1.4.4 -11. Release Information for hdf5-1.4.3 -10. Release Information for hdf5-1.4.2 -9. Release Information for hdf5-1.4.1 -8. Release Information for hdf5-1.4.0 -7. Release Information for hdf5-1.2.2 -6. Release Information for hdf5-1.2.1 -5. Release Information for hdf5-1.2.0 -4. Changes from Release 1.0.0 to Release 1.0.1 -3. Changes from the Beta 1.0.0 Release to Release 1.0.0 -2. Changes from the Second Alpha 1.0.0 Release to the Beta 1.0.0 Release -1. Changes from the First Alpha 1.0.0 Release to the - Second Alpha 1.0.0 Release - -[Search on the string '%%%%' for per-release section breaks.] - ------------------------------------------------------------------------ -%%%%1.4.5%%%% Release Information for hdf5-1.4.5 (02/February/03) - - -13. Release information for HDF5 version 1.4.5 -============================================================================== - - -INTRODUCTION - -This document describes the differences between HDF5-1.4.4 and -HDF5-1.4.5, and contains information on the platforms tested and -known problems in HDF5-1.4.5. For additional information check the -HISTORY.txt file in the HDF5 source. - -The HDF5 documentation can be found on the NCSA ftp server -(ftp.ncsa.uiuc.edu) in the directory: - - /HDF/HDF5/docs/ - -For more information, see the HDF5 home page at: - - http://hdf.ncsa.uiuc.edu/HDF5/ - -If you have any questions or comments, please send them to: - - hdfhelp@ncsa.uiuc.edu - - -CONTENTS - -- New Features -- Bug Fixes since HDF5-1.4.4 -- Performance Improvements -- Documentation -- Platforms Tested -- Supported Configuration Features -- Known Problems - - -New Features -============ - o Configuration - ================ - * Added "unofficial support" for building with a C++ compiler (or at least - not failing badly when building with a C++ compiler). QAK - 2003/01/09 - * Added "unofficial support" for AIX 64bits. See INSTALL for configure - details. AKC - 2002/08/29 - * Added "--with-dmalloc" flag, to easily enable support for the 'dmalloc' - debugging malloc implementation. QAK - 2002/07/15 - - o Library - ========= - o General - --------- - * Allow scalar dataspaces to be used for parallel I/O. QAK - 2002/11/05 - * Added environment variable "HDF5_DISABLE_VERSION_CHECK", which disables - the version checking between the header files and the library linked - into an application if set to '1'. This should be used with caution, - mis-matched headers and library binaries can cause _serious_ problems. - QAK - 2002/10/15 - * Partially fixed space allocation inefficiencies in the file by - improving our algorithms for re-using freed space. QAK - 2002/08/27 - * API tracing has been improved. Nested API calls don't screw up the - output format; function call and return event times can be logged; - total time spent in each function can be logged. The following - HDF5_DEBUG environment variable words affect tracing: - trace -- turn on/off basic tracing - ttimes -- turn on tracing and report event times and - time spent in each API function. - ttop -- turn on tracing but display only top-level - API calls. - - o APIs - ------ - * Several missing fortran APIs have been added to the library: - - h5get_libversion_f h5tget_member_index_f h5dget_storage_size_f - h5check_version_f h5tvlen_create_f h5dvlen_get_max_len_f - h5garbage_collect_f h5dwrite_vl_f - h5dont_atexit_f h5dread_vl_f - - Functions h5dvlen_get_max_len_f, h5dwrite_vl_f, and h5dread_vl_f support - VL Length C APIs functionality for integer, real and string datatypes. - See HDF5 Reference Manual and HDF5 FORTRAN90 User's Notes for more - information and for the functions description. - - o Parallel library - ================== - * The MPI-posix virtual file driver makes gpfs_fcntl() hints to tell - the underlying GPFS file system to avoid prefetching byte range - tokens if USE_GPFS_HINTS is defined when this file is compiled. - This temporary solution is intended to be removed once the HDF5 - API supports the necessary functionality that makes it possible - for this sort of thing do be done at a higher software layer. - RPM - 2002/12/03 - * Added MPI-posix VFL driver. This VFL driver uses MPI functions to - coordinate actions, but performs I/O directly with POSIX sec(2) - (i.e. open/close/read/write/etc.) calls. This driver should _NOT_ - be used to access files that are not on a parallel filesystem. - The following API functions were added: - herr_t H5Pset_fapl_mpiposix(hid_t fapl_id, MPI_Comm comm); - herr_t H5Pget_fapl_mpiposix(hid_t fapl_id, MPI_Comm *comm/*out*/); - QAK - 2002/07/15 - - - - o Support for new platforms and languages - ========================================= - * C++ API now works on the Origin2000 (IRIX6.5.14.) BMR - 2002/11/14 - - - o Misc. - ========================================= - HDF5 1.4.5 works with Portland Group Compilers (pgcc, pgf90 and pgCC - version 4.0-2) on Linux 2.4 - - -Bug Fixes since HDF5-1.4.4 Release -================================== - * H5Fopen without the H5F_ACC_CREAT flag should not succeed in creating - a new file with the 'core' VFL driver. QAK - 2003/01/24 - * Corrected metadata caching bug in parallel I/O which could cause hangs - when chunked datasets were accessed with independent transfer mode. - QAK - 2003/01/23 - * Allow opening objects with unknown object header messages. - QAK - 2003/01/21 - * Added improved error assertion for nil VL strings. It return error - stack instead of a simple assertion. SLU - 2002/12/16 - * Fixed h5dump bug(cannot dump data and datatype) for VL string. - SLU - 2002/11/18 - * Fixed error condition where "none" selections were not being handled - correctly in serial & parallel. QAK - 2002/10/29 - * Fixed problem where optimized hyperslab routines were incorrectly - invoked for parallel I/O operations in collective mode. QAK - 2002/07/22 - * Fixed metadata corruption problem which could occur when many objects - are created in a file during parallel I/O. QAK - 2002/07/19 - * Fixed minor problem with configuration when users specified /usr/include - and /usr/lib for the --with-* options that some compilers can't - handle. BW - 2003/01/23 - - - -Documentation -============= - New PDF files are not available for this release. - - -Platforms Tested -================ - - AIX 5.1 (32 and 64-bit) C for AIX Compiler, Version 6 - xlf 8.1.0.2 - poe 3.2.0.11 - Cray T3E sn6606 2.0.6.08 Cray Standard C Version 6.6.0.1.3 - Cray Fortran Version 3.6.0.0.12 - Cray SV1 10.0.1. 0 Cray Standard C Version 6.6.0.1.3 - Cray Fortran Version 3.6.0.0.12 - Cray T90IEEE 10.0.1.01u Cray Standard C Version 6.4.0.2.3 - Cray Fortran Version 3.4.0.3 - FreeBSD 4.7 gcc 2.95.4 - g++ 2.95.5 - HP-UX B.11.00 HP C HP92453-01 A.11.01.20 - HP F90 v2.4 - IRIX 6.5 MIPSpro cc 7.30 - IRIX64 6.5 (64 & n32) MIPSpro cc 7.3.1.3m - F90 MIPSpro 7.3.1.3m (64 only) - Linux 2.4.18 gcc 3.2.1 - g++ 3.2.1 - Intel(R) C++ Version 6.0 - Intel(R) Fortran Compiler Version 6.0 - PGI compilers (pgcc, pgf90, pgCC) version 4.0-2 - pgf90 3.2-4 - OSF1 V5.1 Compaq C V6.4-014 - Compaq Fortran X5.4A-1684 - gcc version 3.0 for C++ - SunOS 5.7 WorkShop Compilers 5.0 98/12/15 C 5.0 - (Solaris 2.7) WorkShop Compilers 5.0 98/12/15 C++ 5.0 - WorkShop Compilers 5.0 98/10/25 - FORTRAN 90 2.0 Patch 107356-04 - SunOS 5.8/32 Sun WorkShop 6 update 1 C 5.2 2000/09/11 - (Solaris 2.8) Sun WorkShop 6 update 1 Fortran 95 6.1 - Patch 109503-07 2001/08/11 - Sun WorkShop 6 update 1 C++ 5.2 Patch - 109508-04 2001/07/11 - SunOS 5.8/64 Sun WorkShop 6 update 1 C 5.2 2000/09/11 - (Solaris 2.8) Sun WorkShop 6 update 1 Fortran 95 6.1 - Patch 109503-07 2001/08/11 - Sun WorkShop 6 update 1 C++ 5.2 Patch - 109508-04 2001/07/11 - TFLOPS r1.0.4 v4.3.3 i386 pgcc Rel 3.1-4i with mpich-1.2.4 with - local modifications - IA-32 Linux 2.4.9 gcc 2.96 - Intel(R) C++ Version 7.0 - Intel(R) Fortran Compiler Version 7.0 - - IA-64 Linux 2.4.16 ia64 gcc version 2.96 20000731 - Intel(R) C++ Version 7.0 - Intel(R) Fortran Compiler Version 7.0 - Windows 2000 (NT5.0) MSVC++ 6.0 - DEC Visual Fortran 6.0 - Windows XP .NET - Windows NT4.0 Code Warrior 6.0 - MAC OS X Darwin 6.2 - gcc and g++ Apple Computer, Inc. GCC - version 1161, based on gcc version 3.1 - - - -Supported Configuration Features Summary -======================================== - - In the tables below - y = tested and supported - n = not supported or not tested in this release - x = not working in this release - dna = does not apply - ( ) = footnote appears below second table - - - Platform C C F90 F90 C++ Shared zlib - parallel parallel libraries (5) - Solaris2.6 y n y n y y y - Solaris2.7 64-bit y y (1) y y (1) y y y - Solaris2.7 32-bit y y (1) y y (1) y y y - Solaris2.8 64-bit y n y y (1) y y y - Solaris2.8 32-bit y n y y (1) y y y - IRIX6.5 y y (1) n n n y y - IRIX64_6.5 64-bit y y (2) y y y y y - IRIX64_6.5 32-bit y y (2) n n n y y - HPUX11.00 y y (1) y n n y y - OSF1 v5.1 y n y n y y y - T3E (6) y n y n n n y - SV1 y n y n n n y - T90 IEEE y n y n n n y - TFLOPS n y (1) n n n n y - AIX-5.1 32-bit y y y y y n y - AIX-5.1 64-bit y y y y y n y - WinXP (7) y n n n y y y - WinNT/2000 y n y n y y y - WinNT CW y n n n n n y - Mac OS X 10.2 y n n n y y y - FreeBSD y y (1) n n y y y - Linux 2.2 y y (1) y y (1) y y y - Linux 2.4 gcc (3) y y (1) y n y y y - Linux 2.4 Intel (3) y n y n n n y - Linux 2.4 PGI (3) y n y n y n y - Linux 2.4 IA32 y n y n n n y - Linux 2.4 IA64 y n y n n n y - - - Platform static- Thread- SRB GASS STREAM- - exec safe VFD - Solaris2.6 x y n n y - Solaris2.7 64-bit x y n n y - Solaris2.7 32-bit x y n n y - Solaris2.8 64-bit x n n n y - Solaris2.8 32-bit x y n n y - IRIX6.5 x n n n y - IRIX64_6.5 64-bit x y n y y - IRIX64_6.5 32-bit x y n y y - HPUX11.00 x n n n y - OSF1 v5.1 y n n n y - T3E (6) y n n n y - SV1 y n n n y - T90 IEEE y n n n y - TFLOPS y n n n n - AIX-5.1 32-bit y n n n y - AIX-5.1 64-bit y n n n y - WinXP (7) dna n n n n - WinNT/2000 dna n n n n - WinNT CW dna n n n n - Mac OS X 10.2 y n n n y - FreeBSD y y n n y - Linux 2.2 y y n n y - Linux 2.4 gcc (3) y y n n y - Linux 2.4 Intel (3) y n n n y - Linux 2.4 PGI (3) y n n n y - Linux 2.4 IA32 y n n n y - Linux 2.4 IA64 y n n n y - - Notes: (1) Using mpich 1.2.4. - (2) Using mpt and mpich 1.2.4. - (3) Linux 2.4 with GNU, Intel, and PGI compilers. - (4) No HDF4-related tools. - (5) Shared libraries are provided only for the C library, - except on Windows where they are provided for all languages. - (6) Debug mode only. - (7) Binaries only; source code for this platform is not being - released at this time. - - -Known Problems -============== - - * On Linux 2.4 IA64, Fortran test fails for h5dwrite_vl_f - for integer and real base datatypes. - - * When fortran library is built with Intel compilers, compilation - for fflush1.f90, fflush2.f90 and fortanlib_test.f90 will fail - complaining about EXEC function. Comment the call to EXEC subroutine - in each program, or get a patch for the HDF5 Fortran source code. - - * Fortran external dataset test fails on Linux 2.4 with pgf90 compiler. - - * On Windows, h5dump may abort printing if a VL string is longer than 4096 - bytes due to a compiler problem. It'll be fixed in v1.6 release. - - * Datasets or attributes which have a variable-length string datatype are - not printing correctly with h5dump and h5ls. - - * When a dataset with the variable-length datatype is overwritten, - the library can develop memory leaks that cause the file to become - unnecessarily large. This is planned to be fixed in the next release. - - * On the SV1, the h5ls test fails due to a difference between the - SV1 printf precision and the printf precision on other platforms. - - * The h5dump tests may fail to match the expected output on some - platforms (e.g. SP2 parallel, Windows) where the error messages - directed to "stderr" do not appear in the "right order" with output - from stdout. This is not an error. - - * The --enable-static-exec configure flag fails to compile for HP-UX - 11.00 platforms. - - * The executables are always dynamic on IRIX64 6.5(64 and n32) and - IRIX 6.5 even if they are configured with --enable-static-exec. - - * IRIX 6.5 fails to compile if configured with --enable-static-exec. - - * The executables are always dynamic on Solaris 2.7 ans 2.8(64 and n32) - even if they are configured with --enable-static-exec. - - * The HDF5_MPI_OPT_TYPES optimization code in the parallel HDF5 will cause - a hang in some cases when chunked storage is used. This is now set to - be off by default. One may turn it on by setting the environment - variable HDF5_MPI_OPT_TYPES to a non-zero value such as 1. - - * On OSF1 v5.1 and IA32 h5dumpgentst program that generates test files - for h5dump, gives segmentation fault. - - * On Windows platforms, C and Fortran tests fail with the debug DLL version - of the Library if built from all_withf90.zip file. - - * On Cray T3E (sn6606 2.0.6.08 unicosmk CRAY T3E) with Cray Standard C Version 6.6.0.1.3 - compiler optimization causes errors in many HDF5 Library tests. Use -g -h zero flags - to build HDF5 Library. - - * On Cray SV1 10.0.1. 0 datatype convertion test fails. Please check HDF FTP site - if patch is available. We will try to provide one in the nearest future. - - * For configuration, building and testing with Intel and PGI compilers see - corresponding section in INSTALL file. - - -%%%%1.4.4%%%% Release Information for hdf5-1.4.4 (02/July/02) - -12. Release information for HDF5 version 1.4.4 -============================================================================== - -INTRODUCTION - -This document describes the differences between HDF5-1.4.3 and -HDF5-1.4.4, and contains information on the platforms tested and -known problems in HDF5-1.4.4. For more details check the HISTORY.txt -file in the HDF5 source. - -The HDF5 documentation can be found on the NCSA ftp server -(ftp.ncsa.uiuc.edu) in the directory: - - /HDF/HDF5/docs/ - -For more information, see the HDF5 home page at: - - http://hdf.ncsa.uiuc.edu/HDF5/ - -If you have any questions or comments, please send them to: - - hdfhelp@ncsa.uiuc.edu - - -CONTENTS - -- New Features -- Bug Fixes since HDF5-1.4.3 -- Performance Improvements -- Documentation -- Platforms Tested -- Supported Configuration Features -- Known Problems - - -New Features -============ - o Configuration - ================ - * The H4 to H5 tools have been removed from the main source and placed - in a separate package. You can get these tools from the HDF ftp site - (ftp://hdf.ncsa.uiuc.edu/). The "--with-hdf4" command-line option - during configure is no longer valid. BW - 2002/06/25 - - o Library - ========= - o General - --------- - * Fill-value forward-compatibility with release 1.5 was added. SLU - - 2002/04/11 - * A new query function H5Tget_member_index has been added for compound - and enumeration data types. This function retrieves a member's index - by name. SLU - 2002/04/05 - * Added serial multi-gigabyte file size test. "test/big -h" shows - the help page. AKC - 2002/03/29 - - o APIs - ------ - * The F90 subroutines h5dwrite_f, h5dread_f, h5awrite_f, and h5aread_f - were overloaded with a "dims" argument of type INTEGER(HSIZE_T) to - specify the size of the array. We recommend using these subroutines - with the new type; module subroutines that accept "dims" as an i - INTEGER array of size 7 will be deprecated in release 1.6. - EIP - 2002/05/06 - - o Performance - ------------- - * Added internal "small data" aggregation, which can reduce the number of - actual I/O calls made, improving performance. QAK - 2002/06/05 - * Improved internal metadata aggregation, which can reduce the number of - actual I/O calls made, improving performance. Additionally, this can - reduce the size of files produced. QAK - 2002/06/04 - * Improved internal metadata caching, which can reduce the number of - actual I/O calls made by a substantial amount, improving - performance. QAK - 2002/06/03 - - - o Parallel library - ================== - * Fixed bug in parallel I/O routines where a collective I/O which used - MPI derived types, followed by an independent I/O would cause the library - to hang. QAK 2002/06/24 - * Added environment variable flag to control whether creating MPI derived - types is preferred or not. This can affect performance, depending on - which way the MPI-I/O library is optimized. The default is set to - prefer MPI derived types for collective raw data transfers; setting the - HDF5_MPI_PREFER_DERIVED_TYPES environment variable to "0" (i.e.: - "setenv HDF5_MPI_PREFER_DERIVED_TYPES 0") changes the preference to avoid - using them whenever possible. QAK - 2002/06/19 - * Changed MPI I/O routines to avoid creating MPI derived types (and thus - needing to set the file view) for contiguous selections within datasets. - This should result in some performance improvement for those types of - selections. QAK - 2002/06/18 - * Changed MPI type support for collective I/O to be enabled by default. - This can be disabled by setting the HDF5_MPI_OPT_TYPES environment - variable to the value "0". QAK - 2002/06/14 - * Allowed chunks in chunked datasets to be cached when parallel file is - opened for read-only access (bug #709). QAK - 2002/06/10 - * Changed method for allocating chunked dataset blocks to only allocate - blocks that don't already exist, instead of attempting to create all the - blocks all the time. This improves performance for chunked - datasets. QAK - 2002/05/17 - * Allowed the call to MPI_File_sync to be avoided when the file is going to - immediately be closed, improving performance. QAK - 2002/05/13 - * Allowed the metadata writes to be shared among all processes, easing the - burden on process 0. QAK - 2002/05/10 - - - o Tools - ======= - * h5redeploy utility was added. It updates HDF5 compiler tools - after the HDF5 software has been installed in a new location. - - - o Support for new platforms and languages - ========================================= - * Parallel Fortran Library works now on HP-UX B.11.00 Sys V. - EIP - 2002/05/06 - * Intel C++ and F90 compilers Version 6.0 are supported on Linux 2.4. - * Intel C++ compilers Version 6.0 are supported on Windows 2000. - - - o Misc. - ========================================= - * zlib has been moved out of the Windows source release. Users should go to - the ZLIB homepage(http://www.zlib.org) to download the corresponding - zlib library. - * The Windows binary release is built with the old version of the zlib - library. We expect users to use zlib 1.1.4 to build with the source - release. - * In the Windows-specific install document, we specify how to test backward - compatibility. However, in this release, we are not testing the backward - compatibility of HDF5. - - -Bug Fixes since HDF5-1.4.3 Release -================================== - * Fixed bug in chunking routines where they were using internal allocation - free routines, instead of malloc/free, preventing user filters from - working correctly. Chunks are now allocated/freed with malloc/free and - so should the chunks in user filters. QAK 2002/06/18 - * Fixed bug where regular hyperslab selection could get incorrectly - transferred when the number of elements in a row did not fit evenly - into the buffer provided. QAK 2002/06/12 - * Fixed bug (#499) which allowed an "empty" compound or enumerated datatype - (one with no members) to be used to create a dataset or to be committed - to a file. QAK - 2002/06/11 - * Fixed bug (#777) which allowed a compound datatype to be inserted into - itself. QAK - 2002/06/10 - * Fixed bug (#789) where creating 1-D dataset region reference caused the - library to go into infinite loop. QAK - 2002/06/10 - * Fixed bug (#699, fix provided by a user) where a scalar dataspace was - written to the file and then subsequently queried with the - H5Sget_simple_extent_type function; type was reported as H5S_SIMPLE - instead of H5S_SCALAR. EIP - 2002/06/04 - * Clear symbol table node "dirty" flag when flushing symbol tables to - disk, to reduce I/O calls made & improve performance. QAK - 2002/06/03 - * Fixed bug where an object's header could get corrupted in certain - obscure situations when many objects were created in the - file. QAK - 2002/05/31 - * Fixed bug where read/write intent in file IDs created with H5Freopen - was not being kept the same as the original file. QAK - 2002/05/14 - * Fixed bug where selection offsets were not being used when iterating - through point and hyperslab selections with - H5Diterate(). QAK - 2002/04/29 - * Fixed bug where the data for several level deep nested compound & - variable-length datatypes used for datasets were getting corrupted when - written to the file. QAK - 2002/04/17 - * Fixed bug where selection offset was being ignored for certain hyperslab - selections when optimized I/O was being performed. QAK - 2002/04/02 - * Fixed limitation in h5dumper with object names which reached over 1024 - characters in length. We can now handle arbitrarily larger sizes for - object names. BW - 2002/03/29 - * Fixed bug where variable-length string type did not behave as a - string. SLU - 2002/03/28 - * Fixed bug in H5Gget_objinfo() which was not setting the 'fileno' - of the H5G_stat_t struct. QAK - 2002/03/27 - * Fixed data corruption bug in hyperslab routines when contiguous - hyperslab that spans entire dimension and is larger than type - conversion buffer is attempted to be read. QAK - 2002/03/26 - - -Performance Improvements -======================== - This release of the HDF5 library has been extensively tuned to improve -performance, especially to improve parallel I/O performance. - Most of the specific information for particular performance improvements -is mentioned in the "New Features" and "Bug Fixes since HDF5-1.4.3" sections -of this document, but in general, the library should make fewer and larger -I/O requests when accessing a file. Additionally, improvements to the parallel -I/O portions of the library should have reduced the communications and barriers -used in various internal algorithms, improving the performance of the library. - However, with the extensive changes to some portions of the library that -were required for these improvements, some errors or unanticipated results may -have been introduced also. Please report any problems encountered to our -support team at hdfhelp@ncsa.uiuc.edu. - Hopefully these improvements will benefit all HDF5 applications, but if -there are particular I/O patterns that appear to be slower than necessary, -please send e-mail to hdfhelp@ncsa.uiuc.edu with a sample program showing the -problem behavior; we will look into the issue to see if it is possible to -address it. - - -Documentation -============= - * Documentation was updated for the hdf5-1.4.4 release. - * A new "HDF5 User's Guide" is under development. See - http://hdf.ncsa.uiuc.edu/HDF5/doc_dev_snapshot/H5_NewUG/current/. - * A "Parallel HDF5 Tutorial" is available at - http://hdf.ncsa.uiuc.edu/HDF5/doc/Tutor/. - * The "HDF5 Tutorial" is not distributed with this release. It is - available at http://hdf.ncsa.uiuc.edu/HDF5/doc/Tutor/. - - -Platforms Tested -================ - - AIX 4.3.3.0 (IBM SP powerpc) xlc 5.0.2.0 - mpcc_r 5.0.2.0 - xlf 07.01.0000.0002 - mpxlf 07.01.0000.0002 - AIX 4.3 (IBM SP RS6000) C for AIX Compiler, Version 5.0.2.0 - xlf 7.1.0.2 - poe 3.1.0.12 (includes mpi) - AIX 5.1 xlc 5.0.2.0 - xlf 07.01.0000.0002 - mpcc_r 5.0.2.0; mpxlf_r 07.01.0000.0002 - Cray T3E sn6711 2.0.5.57 Cray Standard C Version 6.5.0.3 - Cray Fortran Version 3.5.0.4 - Cray SV1 10.0.1.1 Cray Standard C Version 6.5.0.3 - Cray Fortran Version 3.5.0.4 - FreeBSD 4.6 gcc 2.95.4 - g++ 2.95.4 - HP-UX B.10.20 HP C HP92453-01 A.10.32.30 - HP F90 v2.3 - HP-UX B.11.00 HP C HP92453-01 A.11.01.20 - HP F90 v2.4 - HP-UX B.11.00 SysV HP C HP92453-01 A.11.01.20 - HP F90 v2.4 - HP MPI [not a product] (03/24/2000) B6060BA - IRIX 6.5 MIPSpro cc 7.30 - IRIX64 6.5 (64 & n32) MIPSpro cc 7.3.1.3m - F90 MIPSpro 7.3.1.3m (64 only) - Linux 2.4.9-31smp gcc 2.95.3 - g++ 2.95.3 - Intel(R) C++ Version 6.0 - Intel(R) Fortran Compiler Version 6.0 - MPICH 1.2.2 - Linux 2.2.18smp gcc 2.95.2 - gcc 2.95.2 with mpich 1.2.1 - g++ 2.95.2 - pgf90 3.2-4 - OSF1 V5.1 Compaq C V6.4-014 - Compaq Fortran V5.5-1877-48BBF - gcc version 3.0 for C++ - SunOS 5.7 WorkShop Compilers 5.0 98/12/15 C 5.0 - (Solaris 2.7) WorkShop Compilers 5.0 98/12/15 C++ 5.0 - WorkShop Compilers 5.0 98/10/25 - FORTRAN 90 2.0 Patch 107356-04 - SunOS 5.8/32 Sun WorkShop 6 update 1 C 5.2 2000/09/11 - (Solaris 2.8) Sun WorkShop 6 update 1 Fortran 95 6.1 - Patch 109503-07 2001/08/11 - Sun WorkShop 6 update 1 C++ 5.2 Patch - 109508-04 2001/07/11 - SunOS 5.8/64 Sun WorkShop 6 update 1 C 5.2 2000/09/11 - (Solaris 2.8) Sun WorkShop 6 update 1 Fortran 95 6.1 - Patch 109503-07 2001/08/11 - Sun WorkShop 6 update 1 C++ 5.2 Patch - 109508-04 2001/07/11 - TFLOPS r1.0.4 v4.2.2 i386 pgcc Rel 3.1-4i with mpich-1.2.3 with - local modifications - IA-32 Linux 2.4.9 cc Intel 5.0.1 - gcc 2.96 - Intel(R) C++ Version 6.0 - Intel(R) Fortran Compiler Version 6.0 - - IA-64 Linux 2.4.16 ia64 gcc version 2.96 20000731 - Intel(R) C++ Version 6.0 - Intel(R) Fortran Compiler Version 6.0 - Windows 2000 (NT5.0) MSVC++ 6.0 - DEC Visual Fortran 6.0 - Windows NT4.0 MSVC++ 6.0 - DEC Visual Fortran 6.0 - Windows NT4.0 Code Warrior 6.0 - - -Supported Configuration Features Summary -======================================== - - In the tables below - y = tested and supported - n = not supported or not tested in this release - x = not working in this release - ( ) = footnote appears below second table - - - Platform C C F90 F90 C++ Shared zlib Tools - parallel parallel libraries(5) - Solaris2.6 y n y n y y y y - Solaris2.7 64 y y (1) y n y y y y - Solaris2.7 32 y y (1) y n y y y y - Solaris2.8 64 y n y n y y y y - Solaris2.8 32 y n y n y y y y - IRIX6.5 y y (1) n n n y y y - IRIX64_6.5 64 y y (2) y y n y y y - IRIX64_6.5 n32 y y (2) n n n y y y - HPUX10.20 y n y n n y y y - HPUX11.00 y n y n n y y y - HPUX11 SysV y y y y n y y y - OSF1 v5.1 y n y n y y y y - T3E y y y y n n y y - SV1 y n y n n n y y - TFLOPS n y (1) n n n n y y (4) - AIX-4.3 y y y y y n y y - AIX-5.1 y y y y n n y y - WinNT/2000 y n y n y y y y - WinNT CW y n n n n n y y - FreeBSD y n n n y y y y - Linux 2.2 y y (1) y n y y y y - Linux 2.4 y y (1) n n y y y y - Linux 2.4 Intel(6) y n y n y n y y - Linux 2.4 IA32 y n y n n n y y - Linux 2.4 IA64 y n y n n n y y - - - Platform 1.2 static- Thread- SRB GASS STREAM- - compatibility exec safe VFD - Solaris2.6 y x y n n y - Solaris2.7 64 y x y n n y - Solaris2.7 32 y x y n n y - Solaris2.8 64 y y n n n y - Solaris2.8 32 y x y n n y - IRIX6.5 y x n n n y - IRIX64_6.5 64 y x y n y y - IRIX64_6.5 n32 y x y n y y - HPUX10.20 y y n n n y - HPUX11.00 y x n n n y - HPUX11 SysV y x n n n y - OSF1 v5.1 y y n n n y - T3E y y n n n y - SV1 y y n n n y - TFLOPS y y n n n n - AIX-4.3 y y (3) n n n y - AIX-5.1 y y n n n y - WinNT/2000 y y n n n n - WinNT CW n n n n n n - FreeBSD y y y n n y - Linux 2.2 y y y n n y - Linux 2.4 y y y n n y - Linux 2.4 Intel(6) y y n n n y - Linux 2.4 IA32 y y n n n y - Linux 2.4 IA64 y y n n n y - - - Footnotes: (1) Using mpich. - (2) Using mpt and mpich. - (3) When configured with static-exec enabled, tests fail in - serial mode. - (4) No HDF4-related tools. - (5) Shared libraries are provided only for the C library, - except on Windows where they are provided for all languages. - (6) Linux 2.4 with Intel compilers. - - -Known Problems -============== - - * Datasets or attributes which have a variable-length string datatype are - not printing correctly with h5dump and h5ls. - - * When a dataset with the variable-length datatype is overwritten, - the library can develop memory leaks that cause the file to become - unnecessarily large. This is planned to be fixed in the next release. - - * On the SV1, the h5ls test fails due to a difference between the - SV1 printf precision and the printf precision on other platforms. - - * The h5dump tests may fail to match the expected output on some - platforms (e.g. SP2 parallel, Windows) where the error messages - directed to "stderr" do not appear in the "right order" with output - from stdout. This is not an error. - - * The --enable-static-exec configure flag fails to compile for HP-UX - 11.00 platforms. - - * The executables are always dynamic on IRIX64 6.5(64 and n32) and - IRIX 6.5 even if they are configured with --enable-static-exec. - - * IRIX 6.5 fails to compile if configured with --enable-static-exec. - - * The HDF5_MPI_OPT_TYPES optimization code in the parallel HDF5 will cause - a hang in some cases when chunked storage is used. This is now set to - be off by default. One may turn it on by setting the environment - variable HDF5_MPI_OPT_TYPES to a non-zero value such as 1. - - * On IA32 and IA64 systems, if you use a compiler other than GCC (such as - Intel's ecc or icc compilers), you will need to modify the generated - "libtool" program after configuration is finished. On or around line 104 - of the libtool file, there are lines which look like: - - # How to pass a linker flag through the compiler. - wl="" - - Change these lines to this: - - # How to pass a linker flag through the compiler. - wl="-Wl," - - * To build the Fortran library using Intel compilers, one has to - x modify the source code in the fortran/src directory to remove the - !DEC and !MS compiler directives. - x The build will fail in the fortran/test directory and then in the - fortran/examples directory; to proceed, edit the work.pcl files in - those directories to contain two lines - - work.pc - ../src/work.pc - - * To build the Fortran library on IA64 use - setenv CC "ecc -DIA64" - setenv F9X "efc -cl,work.pcl" - before running configure and see the steps described above. - - -%%%%1.4.3%%%% Release Information for hdf5-1.4.3 (18/Februaru/02) - -11. Release information for HDF5 version 1.4.3 -============================================================================== - - -INTRODUCTION - -This document describes the differences between HDF5-1.4.2 and -HDF5-1.4.3, and contains information on the platforms tested and -known problems in HDF5-1.4.2. For more details check the HISTORY.txt -file in the HDF5 source. - -The HDF5 documentation can be found on the NCSA ftp server -(ftp.ncsa.uiuc.edu) in the directory: - - /HDF/HDF5/docs/ - -For more information look at the HDF5 home page at: - - http://hdf.ncsa.uiuc.edu/HDF5/ - -If you have any questions or comments, please send them to: - - hdfhelp@ncsa.uiuc.edu - - -CONTENTS - -- New Features -- Bug Fixes since HDF5-1.4.2 -- Documentation -- Platforms Tested -- Supported Configuration Features -- Known Problems - - -New Features -============ - o Configuration - ================ - * Can use just enable-threadsafe if the C compiler has built-in pthreads - support. - - o Library - ========= - o General - --------- - * Added a new test to verify the information provided by the configure - command. - * Changed internal error handling macros to reduce code size of library by - about 10%. - - o APIs - ------ - * Changed prototype for H5Awrite from: - H5Awrite(hid_t attr_id, hid_t type_id, void *buf) - to: - H5Awrite(hid_t attr_id, hid_t type_id, const void *buf) - * The H5Pset_fapl_split() accepts raw and meta file names similar to the - syntax of H5Pset_fapl_multi() in addition to what it used to accept. - - C++ API: - * Added operator= to class PredType - * Add the overloaded member function Attribute::getName to return - the attribute name's length as in C API. Note that the current - Attribute::getName, that returns "string", is still available. - * Following the change in the C library, the corresponding C++ API - is changed from: - void Attribute::write( const DataType& mem_type, void *buf ) - to: - void Attribute::write( const DataType& mem_type, const void *buf ) - - o Performance - ------------- - * Added perform programs to test the HDF5 library performance. Programs - are installed in directory perform/. - * Improved performance of byte-swapping during data conversions. - * Improved performance of single, contiguous hyperslabs when reading or - writing. - * Added support to read/write portions of chunks directly, if they are - uncompressed and too large to cache. This should speed up I/O on chunked - datasets for a few more cases. -QAK, 1/31/02 - - o Parallel Library - ================== - * Parallel C HDF5 now works on HP-UX platforms, Compaq clusters, - Linux clusters, Cplants (alpha-linux clusters). - - o Tools - ======= - * A helper script called ``h5cc'', which helps compilation of HDF5 - programs, is now distributed with HDF5. See the reference manual - for information on how to use this feature. - * The H5Dumper can now dump comments associated with groups. -WCW 01-05-02 - - o Support for new platforms and languages - ========================================= - * HDF5 C++ Library is supported on Windows platforms (shared and static) - * HDF5 F90 shared library is supported on Windows platforms. - * HDF5 C Library is supported on IA32 and IA64 platforms. - - - -Bug Fixes since HDF5-1.4.2 Release -================================== - - * Fixed a bug when reading chunked datasets where the edge of the dataset - would be incorrectly detected and generate an assertion failure. - * Fixed a bug where reading an entire dataset wasn't being handled - optimally when the dataset had unlimited dimensions. Dataset is read - in a single low-level I/O now, instead of being broken into separate - pieces internally. - * Fixed a bug where reading or writing chunked data which needed datatype - conversion could result in data values getting corrupted. - * Fixed a bug where appending a point selection to the current selection - would not actually append the point when there were no points defined - currently. - * Fixed a bug where 'or'ing a hyperslab with a 'none' selection would - fail. Now adds that hyperslab as the first hyperlab in the selection. - * Fixed a bug in the 'big' test where quota limits weren't being detected - properly if they caused close() to fail. - * Fixed a bug in internal B-tree code where a B-tree was not being copied - correctly. - * Fixed an off-by-one error in H5Sselect_valid when hyperslab selections - which would allow hyperslab selections which overlapped the edge of the - selection by one element as valid. - * Fixed the internal macros used to encode & decode file metadata, to avoid - an unaligned access warning on IA64 machines. - * Corrected behavior of H5Tinsert to not allow compound datatype fields to - be inserted past the end of the datatype. - * Retired the DPSS virtual file driver (--with-gridstorage configure - option). - * Fixed bug where variable-length datatypes for attributes was not working - correctly. - * Fixed bug where raw data re-allocated from the free-list would sometimes - overlap with the metadata accumulator and get corrupted. QAK - 1/23/02 - * Fixed bug where a preempted chunk in the chunk data could still be - used by an internal pointer and cause an assertion failure or core - dump. QAK - 2/13/02 - * Fixed bug where non-zero fill-value was not being read correctly from - certain chunked datasets when using an "all" or contiguous hyperslab - selection. QAK - 2/14/02 - - -Documentation -============= - * Documentation was updated for the hdf5-1.4.3 release. - * A new "HDF5 User's Guide" is under development. See - http://hdf.ncsa.uiuc.edu/HDF5/doc_dev_snapshot/H5_NewUG/current/. - * Parallel Tutorial is available at http://hdf.ncsa.uiuc.edu/HDF5/doc/Tutor/ - - -Platforms Tested -================ - - AIX 4.3.3.0 (IBM SP powerpc) xlc 5.0.2.0 - mpcc_r 5.0.2.0 - xlf 07.01.0000.0002 - mpxlf 07.01.0000.0002 - AIX 4.3 (IBM SP RS6000) C for AIX Compiler, Version 5.0.2.0 - xlf 7.1.0.2 - poe 3.1.0.12 (includes mpi) - Cray T3E sn6711 2.0.5.57 Cray Standard C Version 6.5.0.3 - Cray Fortran Version 3.5.0.4 - Cray SV1 10.0.0.8 Cray Standard C Version 6.5.0.3 - Cray Fortran Version 3.5.0.4 - FreeBSD 4.5 gcc 2.95.3 - g++ 2.95.3 - HP-UX B.10.20 HP C HP92453-01 A.10.32.30 - HP F90 v2.3 - HP-UX B.11.00 HP C HP92453-01 A.11.01.20 - HP F90 v2.4 - HP-UX B.11.00 SysV HP C HP92453-01 A.11.01.20 - HP F90 v2.4 - HP MPI [not a product] (03/24/2000) B6060BA - IRIX 6.5 MIPSpro cc 7.30 - IRIX64 6.5 (64 & n32) MIPSpro cc 7.3.1.2m - Linux 2.4.4 gcc 2.95.3 - g++ 2.95.3 - Linux 2.2.18smp gcc 2.95.2 - gcc 2.95.2 with mpich 1.2.1 - g++ 2.95.2 - pgf90 3.2-4 - OSF1 V5.1 Compaq C V6.3-028 - Compaq Fortran V5.4-1283 - SunOS 5.7 WorkShop Compilers 5.0 98/12/15 C 5.0 - (Solaris 2.7) Workshop Compilers 5.0 98/12/15 C++ 5.0 - Workshop Compilers 5.0 98/10/25 - FORTRAN 90 2.0 Patch 107356-04 - SunOS 5.8/32 Sun WorkShop 6 update 1 C 5.2 2000/09/11 - (Solaris 2.8) Sun WorkShop 6 update 1 Fortran 95 6.1 - Patch 109503-07 2001/08/11 - Sun WorkShop 6 update 1 C++ 5.2 Patch - 109508-04 2001/07/11 - SunOS 5.8/64 Sun WorkShop 6 update 1 C 5.2 2000/09/11 - (Solaris 2.8) Sun WorkShop 6 update 1 Fortran 95 6.1 - Patch 109503-07 2001/08/11 - Sun WorkShop 6 update 1 C++ 5.2 Patch - 109508-04 2001/07/11 - TFLOPS r1.0.4 v4.0.8 i386 pgcc Rel 3.1-4i with mpich-1.2.1 with - local modifications - IA-32 Linux 2.2.10smpx cc Intel 5.0.1 - egcs-2.91.66 - IA-64 Linux 2.4.16 ia64 gcc version 2.96 20000731 - Intel(R) C++ Itanium(TM) Compiler - for the Itanium(TM)-based applications, - Version 6.0 Beta, Build 20010905 - Windows 2000 (NT5.0) MSVC++ 6.0 - DEC Visual Fortran 6.0 - Windows NT4.0 MSVC++ 6.0 - DEC Visual Fortran 6.0 - Windows NT4.0 Code Warrior 6.0 - Windows 98 MSVC++ 6.0 - DEC Visual Fortran 6.0 - - -Supported Configuration Features Summary -======================================== - - In the tables below - y = tested and supported - n = not supported or not tested in this release - x = not working in this release - ( ) = footnote appears below second table - - - Platform C C F90 F90 C++ Shared zlib Tools - parallel parallel libraries - (5) - Solaris2.7 y y (1) y n y y y y - Solaris2.8 64 y n y n y y y y - Solaris2.8 32 y n y n y y y y - IA-64 y n n n n n y y - IRIX6.5 y y (1) n n n y y y - IRIX64_6.5 64 y y (2) y y n y y y - IRIX64_6.5 32 y y (2) n n n y y y - HPUX10.20 y n y n n y y y - HPUX11.00 y y y n n y y y - HPUX11 SysV y y y n n y y y - DECOSF y n y n y y y y - T3E y y y y n n y y - SV1 y n y n n n y y - TFLOPS y y (1) n n n n y y (4) - AIX-4.3 SP2 y y y y n n y n - AIX-4.3 SP3 y y y y y n y n - Win2000 y n y n y (6) y y y - Win98 y n y n y (6) y y y - WinNT y n y n y (6) y y y - WinNT CW y n n n n n y y - FreeBSD y n n n y y y y - Linux 2.2 y y (1) y n y y y y - Linux 2.4 y y (1) n n y y y y - - - Platform 1.2 static- Thread- SRB GASS STREAM- - compatibility exec safe VFD - Solaris2.7 n x y n n y - Solaris2.8 64 n y n n n y - Solaris2.8 32 n x n n n y - IA-64 n n n n n y - IRIX6.5 n x y n n y - IRIX64_6.5 64 n x y n y y - IRIX64_6.5 32 n x y n y y - HPUX10.20 n y n n n y - HPUX11.00 n x n n n y - HPUX11 SysV n x n n n y - DECOSF n y n n n y - T3E n y n n n y - SV1 n y n n n y - TFLOPS n y n n n n - AIX-4.3 SP2 n y (3) n n n y - AIX-4.3 SP3 n y n n n y - Win2000 n y n n n n - Win98 n y n n n n - WinNT n y n n n n - WinNT CW n n n n n n - FreeBSD n y y n n y - Linux 2.2 n y y n n y - Linux 2.4 n y y n n y - - - Footnotes: (1) Using mpich. - (2) Using mpt and mpich. - (3) When configured with static-exec enabled, tests fail - in serial mode. - (4) No HDF4-related tools. - (5) Shared libraries are provided only for the C library. - (6) Exception of (5): DLL is available for C++ API on Windows - - -Known Problems -============== - - * Datasets or attributes which have a variable-length string datatype are - not printing correctly with h5dump and h5ls. - - * When a dataset with the variable-legth datatype is overwritten, - the library can develop memory leaks that cause the file to become - unnecessarily large. This is planned to be fixed in the next release. - - * On the SV1, the h5ls test fails due to a difference between the - SV1 printf precision and the printf precision on other platforms. - - - * The h5dump tests may fail to match the expected output in some - platforms (e.g. SP2 parallel, Windows) where the error messages - directed to "stderr" do not appear in the "right order" with output - from stdout. This is not an error. - - * The --enable-static-exec configure flag fails to compile for HP-UX - 11.00 platforms. - - * The executables are always dynamic on IRIX64 6.5(64 and n32) and - IRIX 6.5 even if they are configured with --enable-static-exec. - - * IRIX 6.5 fails to compile if configured with --enable-static-exec. - - * The HDF5_MPI_OPT_TYPES optimization code in the parallel HDF5 will cause - a hang in some cases when chunked storage is used. This is now set to - be off by default. One may turn it on by setting environment variable - HDF5_MPI_OPT_TYPES to a non-zero value such as 1. - - * On IA64 systems one has to use -DIA64 compilation flag to compile - h4toh5 and h5toh4 utilites. After configuration step manually modify - Makefile in the tools/h4toh4 and tools/h5toh4 directories to add - -DIA64 to the compilation flags. - - * On IA32 ansd IA64 systems, if you use a compiler other than GCC - (such as Intel's ecc compiler), you will need to modify the generated - "libtool" program after configuration is finished. On or around line 102 - of the libtool file, there are lines which look like: - - # How to pass a linker flag through the compiler. - wl="" - - change the lines to this: - - # How to pass a linker flag through the compiler. - wl="-Wl," - - -%%%%1.4.2%%%% Release Information for hdf5-1.4.2 (31/July/01) - -10. Release Information for hdf5-1.4.2 -================================================================= - - -INTRODUCTION - -This document describes the differences between HDF5-1.4.1 and -HDF5-1.4.2, and contains information on the platforms tested and -known problems in HDF5-1.4.2. - -The HDF5 documentation can be found on the NCSA ftp server -(ftp.ncsa.uiuc.edu) in the directory: - - /HDF/HDF5/docs/ - -For more information look at the HDF5 home page at: - - http://hdf.ncsa.uiuc.edu/HDF5/ - -If you have any questions or comments, please send them to: - - hdfhelp@ncsa.uiuc.edu - - -CONTENTS - -- New Features -- Bug Fixes since HDF5-1.4.1 -- Documentation -- Platforms Tested -- Supported Configuration Features -- Known Problems - - -New Features -============ - - * File sizes greater than 2GB are now supported on Linux systems with - version 2.4.x or higher kernels. - * Added a global string variable H5_lib_vers_info_g which holds the - HDF5 library version information. This can be used to identify - an hdf5 library or hdf5 application binary. - Also added a verification of the consistency between H5_lib_vers_info_g - and other version information in the source code. - * Parallel HDF5 now runs on the HP V2500 and HP N4000 machines. - * F90 API: - - Added aditional parameter "dims" to the h5dread_f/h5dwrite_f and - h5aread_f/h5awrite_f subroutines. This parameter is a 1-D array - of size 7 and contains the sizes of the data buffer dimensions. - This change enables portability between Windows and UNIX platforms. - In previous versions of the F90 APIs, the data buffer parameters of - the above functions were declared as assumed-shape arrays, which - were passed to the C functions by a descriptor. There is no - portable means, however, of passing descriptors from F90 to C, - causing portability problems between Windows and UNIX and among - UNIX platforms. With this change, the data buffers are assumed- - size arrays, which can be portably passed to the C functions. - * F90 static library is available on Windows platforms. - See INSTALL_Windows_withF90.txt for details. - * F90 APIs are available on HPUX 11.00 and 10.20 and IBM SP platforms. - * H5 <-> GIF convertor has been added. This is available under - tools/gifconv. The convertor supports the ability to create animated - gifs as well. - * Verified correct operation of library on Solaris 2.8 in both 64-bit and - 32-bit compilation modes. See INSTALL document for instructions on - compiling the distribution with 64-bit support. - * Added support for the Metrowerks Code Warrior compiler for Windows. - * For H4->H5 converter utility, added a new option to choose not to convert - HDF4 specified attributes(reference number, class) into HDF5 attributes. - * Added support chunking and compression in SDS and image in H4->H5 converter. - Currently HDF5 only supports gzip compression, so by default an HDF4 file - with any other compression method will be converted into an HDF5 file in - gzip compression. - * correct the order or reading HDF4 image array in H4->H5 conversion. - * Added new parallel hdf5 tests in t_mpi. The new test checks if the - filesystem or the MPI-IO can really handle greater than 2GB files. - If it fails, it prints information message only without failing the - test. - * Added a parallel HDF5 example examples/ph5example.c to illustrate - the basic way of using parallel HDF5. - * Added a new public macro, H5_VERS_INFO, which is a string holding - the HDF5 library version information. This string is also compiled - into all HDF5 binary code which helps to identify the version information - of the binary code. One may use the Unix strings command on the binary - file and looks for the pattern "HDF5 library version". - * Added new checking in H5check_version() to verify the five HDF5 version - information macros (H5_VERS_MAJOR, H5_VERS_MINOR, H5_VERS_RELEASE, - H5_VERS_SUBRELEASE and H5_VERS_INFO) are consistent. - - -Bug Fixes since HDF5-1.4.1 Release -================================== - - * Fixed bug with non-zero userblock sizes causing raw data to not - write correctly. - * Fixed problems with Pablo build and linking with non-standard MPI I/O. - * Fixed build on Linux systems with --enable-static-exec flag. It now - works correctly. - * IMPORTANT: Fixed file metadata corruption bug which could cause - metadata data loss in certain situations. - * The allocation by alignment (H5Pset_alignment) feature code somehow - got dropped in some 1.3.x version. Re-implemented it with "new and - improved" algorithm. It keeps track of "wasted" file-fragment in - the free-list too. - * Removed limitation that the data transfer buffer size needed to be - set for datasets whose dimensions were too large for the 'all' - selection code to handle. Any size dimensioned datasets should be - handled correctly now. - * Changed behavior of H5Tget_member_type to correctly emulate HDF5 v1.2.x - when --enable-hdf5v1_2 configure flag is enabled. - * Added --enable-linux-lfs flag to allow more control over whether to - enable or disable large file support on Linux. - * Fixed various bugs releated to SDS dimensional scale conversions in H4->H5 - converter. - * Fixed a bug to correctly convert HDF4 objects with fill value into HDF5. - * Fixed a bug of H5pubconf.h causing repeated definitions if it is included - more than once. hdf5.h now includes H5public.h which includes - H5pubconf.h. Applications should #include hdf5.h which handles multiple - inclusion correctly. - * Fixed H5FDmpio.h to be C++ friendly by making Parallel HDF5 API's to be - external to C++. - * Fixed a bug in H5FD_mpio_flush() that might result in negative file seek - if both MPIO and Split-file drivers are used together. - - - -Documentation -============= - - * The H5T_conv_t and H5T_cdata_t structures are now properly defined - in the H5Tregister entry in the "H5T" section of the "HDF5 Reference - Manual" and described in detail in section 12, "Data Conversions," in - the "Datatypes" chapter of the "HDF5 User's Guide." - * The new tools h52gif and gif2h5 have been added to the "Tools" section - of the Reference Manual. - * A "Freespace Management" section has been added to the "Performance" - chapter of the User's Guide. - * Several user-reported bugs have been fixed since Release 1.4.1. - * The "HDF5 Image and Palette Specification" (in the "HDF5 Application - Developer's Guide") has been heavily revised. Based on extensive user - feedback and input from visualization software developers, Version 1.2 - of the image specification is substantially different from prior - versions. - - -Platforms Tested -================ - - AIX 4.3.3.0 (IBM SP powerpc) xlc 3.6.6.0 - mpcc_r 3.6.6.0 - xlf 07.01.0000.0002 - mpxlf 07.01.0000.0002 - AIX 4.3 (IBM SP RS6000) C for AIX Compiler, Version 5.0.2.0 - xlf 7.1.0.2 - poe 2.4.0.14 (includes mpi) - Cray T3E sn6711 2.0.5.49a Cray Standard C Version 6.5.0.1 - Cray SV1 10.0.0.2 Cray Standard C Version 6.5.0.1 - Cray Fortran Version 3.5.0.1 - FreeBSD 4.3 gcc 2.95.3 - g++ 2.95.3 - HP-UX B.10.20 HP C HP92453-01 A.10.32.30 - HP F90 v2.3 - HP-UX B.11.00 HP C HP92453-01 A.11.01.20 - HP F90 v2.4 - HP-UX B.11.00 SysV HP C HP92453-01 A.11.01.20 - HP F90 v2.4 - IRIX 6.5 MIPSpro cc 7.30 - IRIX64 6.5 (64 & n32) MIPSpro cc 7.3.1.2m - Linux 2.4.4 gcc 2.95.3 - g++ 2.95.3 - Linux 2.2.18smp gcc 2.95.2 - gcc 2.95.2 with mpich 1.2.1 - g++ 2.95.2 - pgf90 3.2-4 - OSF1 V4.0 DEC-V5.2-040 on Digital UNIX V4.0 (Rev 564) - Digital Fortran 90 V4.1-270 - SunOS 5.6 WorkShop Compilers 5.0 98/12/15 C 5.0 - (Solaris 2.6) WorkShop Compilers 5.0 98/10/25 FORTRAN 90 - 2.0 Patch 107356-04 - SunOS 5.7 WorkShop Compilers 5.0 98/12/15 C 5.0 - (Solaris 2.7) Workshop Compilers 5.0 98/12/15 C++ 5.0 - Workshop Compilers 5.0 98/10/25 FORTRAN 90 - 2.0 Patch 107356-04 - SunOS 5.8/32 Sun WorkShop 6 update 1 C 5.2 2000/09/11 - (Solaris 2.8) Sun WorkShop 6 update 1 Fortran 95 6.1 - 2000/09/11 - Sun WorkShop 6 update 1 C++ 5.2 2000/09/11 - SunOS 5.8/64 Sun WorkShop 6 update 1 C 5.2 2000/09/11 - (Solaris 2.8) Sun WorkShop 6 update 1 Fortran 95 6.1 - 2000/09/11 - Sun WorkShop 6 update 1 C++ 5.2 2000/09/11 - TFLOPS r1.0.4 v4.0.7 i386 pgcc Rel 3.1-4i with mpich-1.2.1 with - local modifications - Windows 2000 (NT5.0) MSVC++ 6.0 - Windows NT4.0 MSVC++ 6.0 - DEC Visual Fortran 6.0 - Windows NT4.0 Code Warrior 6.0 - Windows 98 MSVC++ 6.0 - DEC Visual Fortran 6.0 - - -Supported Configuration Features Summary -======================================== - - In the tables below - y = tested and supported - n = not supported or not tested in this release - x = not working in this release - ( ) = footnote appears below second table - - - Platform C C F90 F90 C++ Shared zlib Tools - parallel parallel libraries - (5) - Solaris2.6 y n y n y y y y - Solaris2.7 y y (1) y n y y y y - Solaris2.8 64 y n n n y y y y - Solaris2.8 32 y n y n y y y y - IRIX6.5 y y (1) n n n y y y - IRIX64_6.5 64 y y (2) y y n y y y - IRIX64_6.5 32 y y (2) n n n y y y - HPUX10.20 y n y n n y y y - HPUX11.00 y n y n n y y y - HPUX11 SysV y n y n n y y y - DECOSF y n y n n y y y - T3E y y y y n n y y - SV1 y n y n n n y y - TFLOPS y y (1) n n n n y y (4) - AIX-4.3 SP2 y y y y n n y n - AIX-4.3 SP3 y y y y n n y n - Win2000 y n n n n y y y - Win98 y n y n n y y y - WinNT y n y n n y y y - WinNT CW y n n n n n y y - FreeBSD y n n n y y y y - Linux 2.2 y y (1) y n y y y y - Linux 2.4 y y (1) n n y y y y - - - Platform 1.2 static- Thread- SRB GASS STREAM- - compatibility exec safe VFD - Solaris2.6 y x n n n y - Solaris2.7 y x y n n y - Solaris2.8 64 y y n n n y - Solaris2.8 32 y x n n n y - IRIX6.5 y x y n n y - IRIX64_6.5 64 y x n n n y - IRIX64_6.5 32 y x n n n y - HPUX10.20 y y n n n y - HPUX11.00 y x n n n y - HPUX11 SysV y x n n n y - DECOSF y y n n n y - T3E y y n n n y - SV1 y y n n n y - TFLOPS y y n n n n - AIX-4.3 SP2 y y (3) n n n y - AIX-4.3 SP3 y y n n n y - Win2000 y y n n n n - Win98 n y n n n n - WinNT y y n n n n - WinNT CW n n n n n n - FreeBSD y y n n n y - Linux 2.2 y y y n n y - Linux 2.4 y y y n n y - - - Footnotes: (1) Using mpich. - (2) Using mpt and mpich. - (3) When configured with static-exec enabled, tests fail - in serial mode. - (4) No HDF4-related tools. - (5) Shared libraries are provided only for the C library. - - -Known Problems -============== - - * When a dataset with the variable-legth datatype is overwritten, - the library can develop memory leaks that cause the file to become - unnecessarily large. This is planned to be fixed in the next release. - - * On the SV1, the h5ls test fails due to a difference between the - SV1 printf precision and the printf precision on other platforms. - - * The h5dump tests may fail to match the expected output in some - platforms (e.g. SP2 parallel, Windows) where the error messages - directed to "stderr" do not appear in the "right order" with output - from stdout. This is not an error. - - * The --enable-static-exec configure flag fails to compile for HP-UX - 11.00 platforms. - - * The executables are always dynamic on IRIX64 6.5(64 and n32) and - IRIX 6.5 even if they are configured with --enable-static-exec. - - * IRIX 6.5 fails to compile if configured with --enable-static-exec. - - * For 24-bit image conversion from H4->H5, the current conversion is - not consistent with HDF5 image specification. - - * In some cases, and SDS with an UNLIMITED dimension that has not - been written (current size = 0) is not converted correctly. - - * After "make install" or "make install-doc" one may need to reload - the source from the tar file before doing another build. - - * The HDF5_MPI_OPT_TYPES optimization code in the parallel HDF5 will cause - a hang in some cases when chunked storage is used. This is now set to - be off by default. One may turn it on by setting environment variable - HDF5_MPI_OPT_TYPES to a non-zero value such as 1. - -%%%%1.4.1%%%% Release Information for hdf5-1.4.1 (April/01) - -9. Release Information for hdf5-1.4.1 (April/01) -===================================================================== - - - - HDF5 Release 1.4.1 - - -INTRODUCTION - -This document describes the differences between HDF5-1.4.0 and -HDF5-1.4.1, and contains information on the platforms tested and -known problems in HDF5-1.4.1. - -The HDF5 documentation can be found on the NCSA ftp server -(ftp.ncsa.uiuc.edu) in the directory: - - /HDF/HDF5/docs/ - -For more information look at the HDF5 home page at: - - http://hdf.ncsa.uiuc.edu/HDF5/ - -If you have any questions or comments, please send them to: - - hdfhelp@ncsa.uiuc.edu - - -CONTENTS - -- New Features -- Bug Fixes since HDF5-1.4.0 -- Documentation -- Platforms Tested -- Supported Configuration Features -- Known Problems - - -New Features -============ - - * XML output option for h5dump utility. - - A new option --xml to output data in XML format has been added. The - XML output contains a complete description of the file, marked up in - XML. - - The XML conforms to the HDF5 Document Type Definition (DTD), which - is available at: - - http://hdf.ncsa.uiuc.edu/DTDs/HDF5-File.dtd - - The XML output is suitable for use with other tools, including the - Java Tools: - - http://hdf.ncsa.uiuc.edu/java-hdf5-html - - -Bug Fixes since HDF5-1.4.0 Release -================================== - - * h4toh5 utility: conversion of images is fixed - - Earlier releases of the h4toh5 utility produced images that did not - correctly conform to the HDF5 Image and Palette Specification. - - http://hdf.ncsa.uiuc.edu/HDF5/doc/ImageSpec.html - - Several required HDF5 attributes are omitted, and the dataspace - is reversed (i.e., the ht. and width of the image dataset is - incorrectly described.) For more information, please see: - - http://hdf.ncsa.uiuc.edu/HDF5/H5Image/ImageDetails.htm - - * Fixed bug with contiguous hyperslabs not being detected, causing - slower I/O than necessary. - * Fixed bug where non-aligned hyperslab I/O on chunked datasets was - causing errors during I/O - * The RCSID string in H5public.h was causing the C++ compiling problem - because when it was included multiple times, C++ did not like - multiple definitions of the same static variable. All occurance of - RCSID definition are removed since we have not used it consistently - before. - - -Documentation -============= - - PDF and Postscript versions of the following documents are available - for this release: - Document Filename - -------- -------- - Introduction to HDF5 H5-R141-Introduction.pdf - HDF5 Reference Manual H5-R141-RefManual.pdf - C++ APIs to HDF5 documents H5-R141-Cplusplus.pdf - Fortran90 APIs to HDF5 documents H5-R141-Fortran90.pdf - - PDF and Postscript files containing H5-R141-DocSet.pdf - all of the above H5-R141-DocSet.ps - - These files are not included in this distribution, but are available - via the Web or FTP at the following locations: - http://hdf.ncsa.uiuc.edu/HDF5/doc/PSandPDF/ - ftp://ftp.ncsa.uiuc.edu/HDF/HDF5/docs/ - - While these documents are labeled Release 1.4.1, they describe - Release 1.4.0 as well. - - -Platforms Tested -================ - -Due to the nature of this release only C, C++ libraries and tools were tested. - - AIX 4.3.3.0 (IBM SP powerpc) xlc 3.6.6 - mpcc_r 3.6.6 - Cray T3E sn6711 2.0.5.47 Cray Standard C Version 6.5.0.0 - Cray SV1 10.0.0.8 Cray Standard C Version 6.5.0.0 - FreeBSD 4.3 gcc 2.95.2 - HP-UX B.10.20 HP C HP92453-01 A.10.32.30 - HP-UX B.11.00 HP C HP92453-01 A.11.01.20 - IRIX 6.5 MIPSpro cc 7.30 - IRIX64 6.5 (64 & n32) MIPSpro cc 7.3.1m - Linux 2.2.18smp gcc-2.95.2 - g++ 2.95.2 - OSF1 V4.0 DEC-V5.2-040 - Digital Fortran 90 V4.1-270 - SunOS 5.6 WorkShop Compilers 5.0 98/12/15 C 5.0 - (Solaris 2.6) - - SunOS 5.7 WorkShop Compilers 5.0 98/12/15 C 5.0 - (Solaris 2.7) Workshop Compilers 5.0 98/12/15 C++ 5.0 - TFLOPS r1.0.4 v4.0 mpich-1.2.1 with local changes - Windows NT4.0, 2000 (NT5.0) MSVC++ 6.0 - Windows 98 MSVC++ 6.0 - - -Supported Configuration Features Summary -======================================== - - * See "Supported Configuration Features Summary" section for the HDF5 - 1.4.0 release in the HISTORY.txt file. - -Known Problems -============== - - * The h5dump tests may fail to match the expected output in some - platforms (e.g. SP2 parallel, Windows) where the error messages - directed to "stderr" do not appear in the "right order" with output - from stdout. This is not an error. - - * The --enable-static-exec configure flag fails to compile for HP-UX - 11.00 platforms. - - * The executable are always dynamic on IRIX64 6.5(64 and n32) and - IRIX 6.5 even if they are configured with --enable-static-exec. - - * The shared library failed compilation on IRIX 6.5. - - * After "make install" or "make install-doc" one may need to reload the source - from the tar file before doing another build. - - * See "Known problems" section for the HDF5 1.4.0 release in the - HISTORY.txt file. - -%%%%1.4.0%%%% Release Information for hdf5-1.4.0 (2/22/01) - -8. Release Information for hdf5-1.4.0 -=================================================================== - - HDF5 Release 1.4.0 - - -INTRODUCTION - -This document describes the differences between HDF5-1.2.0 and -HDF5-1.4.0, and contains information on the platforms tested and -known problems in HDF5-1.4.0. For more details check the HISTORY.txt -file in the HDF5 source. - -The HDF5 documentation can be found on the NCSA ftp server -(ftp.ncsa.uiuc.edu) in the directory: - - /HDF/HDF5/docs/ - -For more information look at the HDF5 home page at: - - http://hdf.ncsa.uiuc.edu/HDF5/ - -If you have any questions or comments, please send them to: - - hdfhelp@ncsa.uiuc.edu - - -CONTENTS - -- New Features -- h4toh5 Utility -- F90 Support -- C++ Support -- Pablo Support -- Bug Fixes since HDF5-1.2.0 -- Bug Fixes since HDF5-1.4.0-beta2 -- Bug Fixes since HDF5-1.4.0 -- Documentation -- Platforms Tested -- Supported Configuration Features -- Known Problems - - -New Features -============ - * The Virtual File Layer, VFL, was added to replace the old file - drivers. It also provides an API for user defined file drivers. - * New features added to snapshots. Use 'snapshot help' to see a - complete list of features. - * Improved configure to detect if MPIO routines are available when - parallel mode is requested. - * Added Thread-Safe support. Phase I implemented. See: - - http://hdf.ncsa.uiuc.edu/HDF5/papers/mthdf/MTHDFpaper.htm - - for more details. - * Added data sieve buffering to raw data I/O path. This is enabled - for all VFL drivers except the mpio & core drivers. Setting the - sieve buffer size is controlled with the new API function, - H5Pset_sieve_buf_size(), and retrieved with H5Pget_sieve_buf_size(). - * Added new Virtual File Driver, Stream VFD, to send/receive entire - HDF5 files via socket connections. - * As parts of VFL, HDF-GASS and HDF-SRB are also added to this - release. To find out details, please read INSTALL_VFL file. - * Increased maximum number of dimensions for a dataset (H5S_MAX_RANK) - from 31 to 32 to align with HDF4 & netCDF. - * Added 'query' function to VFL drivers. Also added 'type' parameter to - VFL 'read' & 'write' calls, so they are aware of the type of data - being accessed in the file. Updated the VFL document also. - * A new h4toh5 utility, to convert HDF4 files to analogous HDF5 files. - * Added a new array datatype to the datatypes which can be created. - Removed "array fields" from compound datatypes (use an array datatype - instead). - * Parallel HDF5 works correctly with mpich-1.2.1 on Solaris, SGI, Linux. - * You can now install the HDF5 documentation using the - ``make install-doc'' command. The documentation is installed in the - $(prefix)/doc directory where $(prefix) is the prefix specified by - the (optional) ``--prefix'' flag during configuration. - * HDF5 can operate correctly in the OpenMP environment in a limited way. - Check doc/html/TechNotes/openmp-hdf5.html for details. - - -h4toh5 Utility -============== - The h4toh5 utility is a new utility that converts an HDF4 file to an - HDF5 file. For details, see the document, "Mapping HDF4 Objects to - HDF5 Objects": - http://hdf.ncsa.uiuc.edu/HDF5/papers/H4-H5MappingGuidelines.pdf - - Known Bugs: - - The h4toh5 utility produces images that do not correctly conform - to the HDF5 Image and Palette Specification. - - http://hdf.ncsa.uiuc.edu/HDF5/doc/ImageSpec.html - - Several required HDF5 attributes are omitted, and the dataspace - is reversed (i.e., the ht. and width of the image dataset is - incorrectly described.) For more information, please see: - - http://hdf.ncsa.uiuc.edu/HDF5/H5Image/ImageDetails.html - - This bug has been fixed for the snapshot of hdf5 1.4 release. March 12th,2001 - - Known Limitations of the h4toh5 release - --------------------------------------------- - - 1. Error handlings - - h4toh5 utility will print out an error message when an error occurs. - - 2. String Datatype - - HDF4 has no 'string' type. String valued data are usually defined as - an array of 'char' in HDF4. The h4toh5 utility will generally map - these to HDF5 'String' types rather than array of char, with the - following additional rules: - - * For the data of an HDF4 SDS, image, and palette, if the data is - declared 'DFNT_CHAR8' it will be assumed to be integer and will - be an H5T_INTEGER type. - * For attributes of any HDF4 object, data of type 'DFNT_CHAR8' - will be converted to an HDF5 'H5T_STRING' type. - * For an HDF4 Vdata, it is difficult to determine whether data - of type 'DFNT_CHAR8' is intended to be bytes or characters. The - h4toh5 utility will consider them to be C characters, and will - convert them to an HDF5 'H5T_STRING' type. - - - 3. Compression, Chunking and External Storage - - Chunking is supported, but compression and external storage is not. - - An HDF4 object that uses chunking will be converted to an HDF5 file - with analogous chunked storage. - - An HDF4 object that uses compression will be converted to an - uncompressed HDF5 object. - - An HDF4 object that uses external storage will be converted to an - HDF5 object without external storage. - - 4. Memory Use - - This version of the h4toh5 utility copies data from HDF4 objects - in a single read followed by a single write to the HDF5 object. For - large objects, this requires a very large amount of memory, which may - be extremely slow or fail on some platforms. - - Note that a dataset that has only been partly written will - be read completely, including uninitialized data, and all the - data will be written to the HDF5 object. - - 5. Platforms - - The h4toh5 utility requires HDF5-1.4.0 and HDF4r1.4 - - h4toh5 utility has been tested on all platforms listed below (see - section "Platforms Tested") except TFLOPS. - - -F90 Support -=========== - This is the first release of the HDF5 Library with fully integrated - F90 API support. The Fortran Library is created when the - --enable-fortran flag is specified during configuration. - - Not all F90 subroutines are implemented. Please refer to the HDF5 - Reference Manual for more details. - - F90 APIs are available for the Solaris 2.6 and 2.7, Linux, DEC UNIX, - T3E, SV1 and O2K (64 bit option only) platforms. The Parallel version of - the HDF5 F90 Library is supported on the O2K and T3E platforms. - - Changes since the last prototype release (July 2000) - ---------------------------------------------------- - * h5open_f and h5close_f must be called instead of h5init_types and - h5close_types. - - * The following subroutines are no longer available: - - h5pset_xfer_f - h5pget_xfer_f - h5pset_mpi_f - h5pget_mpi_f - h5pset_stdio_f - h5pget_stdio_f - h5pset_sec2_f - h5pget_sec2_f - h5pset_core_f - h5pget_core_f - h5pset_family_f - h5pget_family_f - - * The following functions have been added: - - h5pset_fapl_mpio_f - h5pget_fapl_mpio_f - h5pset_dxpl_mpio_f - h5pget_dxpl_mpio_f - - * In the previous HDF5 F90 releases, the implementation of object - references and dataset region references was not portable. This - release introduces a portable implementation, but it also introduces - changes to the read/write APIs that handle references. If object or - dataset region references are written or read to/from an HDF5 file, - h5dwrite_f and h5dread_f must use the extra parameter, n, for the - buffer size: - - h5dwrite(read)_f(dset_id, mem_type_id, buf, n, hdferr, & - ^^^ - mem_space_id, file_space_id, xfer_prp) - - For other datatypes the APIs were not changed. - - -C++ Support -=========== - This is the first release of the HDF5 Library with fully integrated - C++ API support. The HDF5 C++ library is built when the --enable-cxx - flag is specified during configuration. - - Check the HDF5 Reference Manual for available C++ documentation. - - C++ APIs are available for Solaris 2.6 and 2.7, Linux, and FreeBSD. - - -Pablo Support -============= - This version does not allow proper building of the Pablo-instrumented - version of the library. A version supporting the pablo build is - available on the Pablo Website at - www-pablo.cs.uiuc.edu/pub/Pablo.Release.5/HDFLibrary/hdf5_v1.4.tar.gz - - -Bug Fixes since HDF5-1.2.0 -========================== - -Library -------- - * The function H5Pset_mpi is renamed as H5Pset_fapl_mpio. - * Corrected a floating point number conversion error for the Cray J90 - platform. The error did not convert the value 0.0 correctly. - * Error was fixed which was not allowing dataset region references to - have their regions retrieved correctly. - * Corrected a bug that caused non-parallel file drivers to fail in - the parallel version. - * Added internal free-lists to reduce memory required by the library - and H5garbage_collect API function - * Fixed error in H5Giterate which was not updating the "index" - parameter correctly. - * Fixed error in hyperslab iteration which was not walking through the - correct sequence of array elements if hyperslabs were staggered in a - certain pattern - * Fixed several other problems in hyperslab iteration code. - * Fixed another H5Giterate bug which was causes groups with large - numbers of objects in them to misbehave when the callback function - returned non-zero values. - * Changed return type of H5Aiterate and H5A_operator_t typedef to be - herr_t, to align them with the dataset and group iterator functions. - * Changed H5Screate_simple and H5Sset_extent_simple to not allow - dimensions of size 0 with out the same dimension being unlimited. - * QAK - 4/19/00 - Improved metadata hashing & caching algorithms to - avoid many hash flushes and also remove some redundant I/O when - moving metadata blocks in the file. - * The "struct(opt)" type conversion function which gets invoked for - certain compound datatype conversions was fixed for nested compound - types. This required a small change in the datatype conversion - function API. - * Re-wrote lots of the hyperslab code to speed it up quite a bit. - * Added bounded garbage collection for the free lists when they run - out of memory and also added H5set_free_list_limits API call to - allow users to put an upper limit on the amount of memory used for - free lists. - * Checked for non-existent or deleted objects when dereferencing one - with object or region references and disallow dereference. - * "Time" datatypes (H5T_UNIX_D*) were not being stored and retrieved - from object headers correctly, fixed now. - * Fixed H5Dread or H5Dwrite calls with H5FD_MPIO_COLLECTIVE requests - that may hang because not all processes are transfer the same amount - of data. (A.K.A. prematured collective return when zero amount data - requested.) Collective calls that may cause hanging is done via the - corresponding MPI-IO independent calls. - * If configure with --enable-debug=all, couple functions would issue - warning messages to "stderr" that the operation is expensive time-wise. - This messed up applications (like testings) that did not expect the - extra output. It is changed so that the warning will be printed only - if the corresponding Debug key is set. - -Configuration -------------- - * The hdf5.h include file was fixed to allow the HDF5 Library to be - compiled with other libraries/applications that use GNU autoconf. - * Configuration for parallel HDF5 was improved. Configure now attempts - to link with libmpi.a and/or libmpio.a as the MPI libraries by - default. It also uses "mpirun" to launch MPI tests by default. It - tests to link MPIO routines during the configuration stage, rather - than failing later as before. One can just do "./configure - --enable-parallel" if the MPI library is in the system library. - * Added support for pthread library and thread-safe option. - * The libhdf5.settings file shows the correct machine byte-sex. - * Added option "--enable-stream-vfd" to configure w/o the Stream VFD. - For Solaris, added -lsocket to the LIBS list of libraries. - -Tools ------ - * h5dump now accepts both short and long command-line parameters: - -h, --help Print a usage message and exit - -B, --bootblock Print the content of the boot block - -H, --header Print the header only; no data is displayed - -i, --object-ids Print the object ids - -V, --version Print version number and exit - -a P, --attribute=P Print the specified attribute - -d P, --dataset=P Print the specified dataset - -g P, --group=P Print the specified group and all members - -l P, --soft-link=P Print the value(s) of the specified soft link - -o F, --output=F Output raw data into file F - -t T, --datatype=T Print the specified named data type - -w #, --width=# Set the number of columns - - P - is the full path from the root group to the object. - T - is the name of the data type. - F - is a filename. - # - is an integer greater than 1. - * A change from the old way command line parameters were interpreted - is that multiple attributes, datasets, groups, soft-links, and - object-ids cannot be specified with just one flag but you have to - use a flag with each object. I.e., instead of doing this: - - h5dump -a /attr1 /attr2 foo.h5 - - do this: - - h5dump -a /attr1 -a /attr2 foo.h5 - - The cases are similar for the other object types. - * h5dump correctly displays compound datatypes. - * Corrected an error in h5toh4 which did not convert the 32bits - int from HDF5 to HDF4 correctly for the T3E platform. - * h5dump correctly displays the committed copy of predefined types - correctly. - * Added an option, -V, to show the version information of h5dump. - * Fixed a core dumping bug of h5toh4 when executed on platforms like - TFLOPS. - * The test script for h5toh4 used to not able to detect the hdp - dumper command was not valid. It now detects and reports the - failure of hdp execution. - * Merged the tools with the 1.2.2 branch. Required adding new - macros, VERSION12 and VERSION13, used in conditional compilation. - Updated the Windows project files for the tools. - * h5dump displays opaque and bitfield data correctly. - * h5dump and h5ls can browse files created with the Stream VFD - (eg. "h5ls :"). - * h5dump has a new feature "-o " which outputs the raw data - of the dataset into ascii text file . - * h5toh4 used to converts hdf5 strings type to hdf4 DFNT_INT8 type. - Corrected to produce hdf4 DFNT_CHAR type instead. - * h5dump and h5ls displays array data correctly. - - -Bug Fixes since HDF5-1.4.0-beta2 -================================ - * Fixed a bug in the conversion from a little endian double to a big - endian float in some special cases. - * Corrected configuration error which was not including compression - support correctly. - * Cleaned up lots of warnings. - * Changed a few h5dump command line switches and added long versions of - the switches. - * Changed parameters for H5Tconvert, H5Pset_bufer and H5Pget_buffer from - size_t to hsize_t - * Fixed fairly obscure bug in hyperslab I/O which could (in rare cases) - not copy all the data during a transfer. - * Removed ragged array code from library. - * F90 library and module files are installed properly now on all supported - platforms. - - -Bug Fixes since HDF5-1.4.0 Release -================================== - - * Fixed bug with contiguous hyperslabs not being detected, causing - slower I/O than necessary. - * Fixed bug where non-aligned hyperslab I/O on chunked datasets was - causing errors during I/O - * Implemented XML support in h5dump. - - -Documentation -============= - * A new document summarizing the changes in the library leading up to - the current release has been added: - HDF5 Software Changes from Release to Release - This document is in the Application Developer's Guide and is of - particular interest to developers who must keep an application - synchronized with the library. - * The documentation for the Fortran90 and C++ APIs is linked to the - opening page of the Reference Manual. Fortran90 functions are - individually referenced from the corresponding C functions through- - out the Reference Manual. - * User's Guide and Reference Manual were updated to reflect changed - function syntax and to fix reported bugs. - * Functions that are new at this release were added to the Reference - Manual. - * Functions that have been removed from the library were removed from - the User's Guide and the Reference Manual. - * PostScript and PDF versions of the Release 1.4 document set are - not available at the time of Release 1.4.0. - - -Platforms Tested -================ - AIX 4.3.3.0 (IBM SP powerpc) xlc 3.6.6 - mpcc_r 3.6.6 - Cray T3E sn6711 2.0.5.45 Cray Standard C Version 6.4.0.0 - Cray Fortran Version 3.4.0.2 - Cray SV1 sn9605 10.0.0.7 Cray Standard C Version 6.4.0.0 - Cray Fortran Version 3.4.0.2 - FreeBSD 4.2 gcc 2.95.2 - g++ 2.95.2 - HP-UX B.10.20 HP C HP92453-01 A.10.32.30 - HP-UX B.11.00 HP C HP92453-01 A.11.00.13 - IRIX 6.5 MIPSpro cc 7.30 - mpich-1.2.1 - IRIX64 6.5 (64 & n32) MIPSpro cc 7.3.1m - mpt.1.4.0.2 - mpich-1.2.1 - Linux 2.2.16-3smp gcc-2.95.2 - g++ 2.95.2 - pgf90 3.1-3 - mpich-1.2.1 - OSF1 V4.0 DEC-V5.2-040 - Digital Fortran 90 V4.1-270 - SunOS 5.6 WorkShop Compilers 5.0 98/12/15 C 5.0 - (Solaris 2.6) WorkShop Compilers 5.0 99/10/25 Fortran 90 - 2.0 Patch 107356-04 - Workshop Compilers 5.0 98/12/15 C++ 5.0 - SunOS 5.7 WorkShop Compilers 5.0 98/12/15 C 5.0 - (Solaris 2.7) WorkShop Compilers 5.0 99/10/25 Fortran 90 - 2.0 Patch 107356-04 - Workshop Compilers 5.0 98/12/15 C++ 5.0 - mpich-1.2.1 - SunOS 5.5.1 gcc-2.7.2 - (Solaris 2.5.1 (x86)) - TFLOPS r1.0.4 v4.0 mpich-1.2.1 with local changes - Windows NT4.0, 2000 (NT5.0) MSVC++ 6.0 - Windows 98 MSVC++ 6.0 - - -Supported Configuration Features Summary -======================================== - In the tables below - y = tested and supported - n = not supported or not working in this release - ( ) = footnote appears below table - - Platform C C F90 F90 C++ Shared zlib Tools - parallel parallel libraries - Solaris2.6 y n y n y y y y - Solaris2.7 y y (1) y n y y y y - Solarisx86 y n n n n y y y - IRIX6.5 y y (1) n n n n y y - IRIX64_6.5 64 y y (2) y y n y y y - IRIX64_6.5 32 y y (2) n n n y y y - HPUX10.20 y n n n n y y y - DECOSF y n y n n y y y - T3E y y y y n n y y - SV1 y n y n n n y y - TFLOPS y y (1) n n n n y y (4) - AIX-4.3 y y n n n n y n - Win2000 y n n n n y y y - Win98 y n n n n y y y - WinNT y n n n n y y y - FreeBSD y n n n y y y y - Linux y y (1) y n y y y y - - - Platform 1.2 static- Thread- SRB GASS STREAM- - compatibility exec safe VFD - Solaris2.6 y n n n n y - Solaris2.7 y n y n n y - Solarisx86 y n n n n y - IRIX6.5 y n y n n y - IRIX64_6.5 64 y n n n n y - IRIX64_6.5 32 y n n n n y - HPUX10.20 y y n n n y - DECOSF y y n n n y - T3E y y n n n y - SV1 y y n n n y - TFLOPS y y n n n n - AIX-4.3 y y (3) n n n y - Win2000 y y n n n n - Win98 y y n n n n - WinNT y y n n n n - FreeBSD y y n n n y - Linux y n y n n y - - Footnotes: (1) Using mpich. - (2) Using mpt and mpich. - (3) When configured with static-exec enabled, tests fail - in serial mode. - (4) No HDF4-related tools. - - -Known Problems -============== - * The stream-vfd test uses ip port 10007 for testing. If another - application is already using that port address, the test will hang - indefinitely and has to be terminated by the kill command. To try the - test again, change the port address in test/stream_test.c to one not - being used in the host. - - * The --enable-static-exec configure flag fails to compile for Solaris - platforms. This is due to the fact that not all of the system - libraries on Solaris are available in a static format. - - The --enable-static-exec configure flag also fails to correctly compile - on Linux platforms using the gcc-2.95.2 compiler. - - The --enable-static-exec configure flag also fails to correctly compile - on IBM SP2 platform for the serial mode. The parallel mode works fine - with this option. - - The compilation fails if configured with --enable-static-exec on IRIX 6.5. - - The executable files in hdf5/bin are dynamic-linked for IRIX64 6.5(64 and - n32 modes) and IRIX 6.5, even though they are compiled with static library. - - It is suggested that you don't use this option on these platforms - during configuration. - - * testhdf5 got bus error with configuration options --prefix and --with-hdf4 - on IRIX 6.5. - - * With the gcc 2.95.2 compiler, HDF 5 uses the `-ansi' flag during - compilation. The ANSI version of the compiler complains about not being - able to handle the `long long' datatype with the warning: - - warning: ANSI C does not support `long long' - - This warning is innocuous and can be safely ignored. - - * SunOS 5.6 with C WorkShop Compilers 4.2: Hyperslab selections will - fail if library is compiled using optimization of any level. - - * When building hdf5 tools and applications on windows platform, a linking - warning: defaultlib "LIBC" conflicts with use of other libs will appear - on debug version when running VC++6.0. This warning doesn't affect building - and testing hdf5 applications. We will continue investigating this. - - * h5toh4 converter fails two cases(tstr.h5 and tmany.h5) for release dll - version on windows 2000 and NT. The reason is possibly due to Windows NT - DLL convention on freeing memory. It seems that memory cannot be freed - across library or DLL. It is still under investigation. - - * HDF-GASS testings and testhdf5 in the test directory will get bus error if - the configured with --with-gass. - - * HDF-SRB testing got segmentation error on Solaris 2.7. - - * The Stream VFD was not tested yet under Windows. - It is not supported in the TFLOPS machine. - - * Shared library option is broken for IBM SP and some Origin 2000 platforms. - One needs to run ./configure with '--disable-shared --enable-static'. - - * The ./dsets tests failed in the TFLOPS machine if the test program, - dsets.c, is compiled with the -O option. The hdf5 library still works - correctly with the -O option. The test program works fine if it is - compiled with -O1 or -O0. Only -O (same as -O2) causes the test - program to fail. - - * Certain platforms give false negatives when testing h5ls: - - Solaris x86 2.5.1, Cray T3E and Cray J90 give errors during testing - when displaying object references in certain files. These are benign - differences due to the difference in sizes of the objects created on - those platforms. h5ls appears to be dumping object references - correctly. - - Cray J90 give errors during testing when displaying - some floating-point values. These are benign differences due to the - different precision in the values displayed and h5ls appears to be - dumping floating-point numbers correctly. - - * Before building HDF5 F90 Library from source on Crays (T3E and SV1) - replace H5Aff.f90, H5Dff.f90 and H5Pff.f90 files in the fortran/src - subdirectory in the top level directory with the Cray-specific files from - the ftp://ftp.ncsa.uiuc.edu/HDF/HDF5/hdf5-1.4.0/src/crayf90/ directory. - - * The h4toh5 utility produces images that do not correctly conform - to the HDF5 Image and Palette Specification. - - http://hdf.ncsa.uiuc.edu/HDF5/doc/ImageSpec.html - - Several required HDF5 attributes are omitted, and the dataspace - is reversed (i.e., the ht. and width of the image dataset is - incorrectly described.) For more information, please see: - - http://hdf.ncsa.uiuc.edu/HDF5/H5Image/ImageDetails.htm - -%%%%1.2.2%%%% Release Information for hdf5-1.2.2 (6/23/00) - -7. Release Information for hdf5-1.2.2 -================================================================= -INTRODUCTION - -This document describes the differences between HDF5-1.2.1 and -HDF5-1.2.2, and contains information on the platforms where HDF5-1.2.2 -was tested and known problems in HDF5-1.2.2. - -The HDF5 documentation can be found on the NCSA ftp server -(ftp.ncsa.uiuc.edu) in the directory: - - /HDF/HDF5/docs/ - -For more information look at the HDF5 home page at: - - http://hdf.ncsa.uiuc.edu/HDF5/ - -If you have any questions or comments, please send them to: - - hdfhelp@ncsa.uiuc.edu - - -CONTENTS - -- Features Added since HDF5-1.2.1 -- Bug Fixes since HDF5-1.2.1 -- Known Problems -- Platforms Tested - - -Features Added since HDF5-1.2.1 -=============================== - * Added internal free-lists to reduce memory required by the library and - H5garbage_collect API function. - * h5dump displays opaque and bitfield types. - * New features added to snapshots. Use 'snapshot help' to see a - complete list of features. - * Improved configure to detect if MPIO routines are available when - parallel mode is requested. - -Bug Fixes since HDF5-1.2.1 -========================== - * h5dump correctly displays compound datatypes, including simple and - nested compound types. - * h5dump correctly displays the committed copy of predefined types. - * Corrected an error in h5toh4 which did not convert the 32-bit - int from HDF5 to HDF4 correctly for the T3E platform. - * Corrected a floating point number conversion error for the - Cray J90 platform. The error did not convert the value 0.0 - correctly. - * Fixed error in H5Giterate which was not updating the "index" parameter - correctly. - * Fixed error in hyperslab iteration which was not walking through the - correct sequence of array elements if hyperslabs were staggered in a - certain pattern. - * Fixed several other problems in hyperslab iteration code. - * Fixed another H5Giterate bug which caused groups with large numbers - of objects in them to misbehave when the callback function returned - non-zero values. - * Changed return type of H5Aiterate and H5A_operator_t typedef to be - herr_t, to align them with the dataset and group iterator functions. - * Changed H5Screate_simple and H5Sset_extent_simple to not allow dimensions - of size 0 without the same dimension being unlimited. - * Improved metadata hashing & caching algorithms to avoid - many hash flushes and also removed some redundant I/O when moving metadata - blocks in the file. - * The libhdf5.settings file shows the correct machine byte-sex. - * The "struct(opt)" type conversion function which gets invoked for - certain compound datatype conversions was fixed for nested compound - types. This required a small change in the datatype conversion - function API. - -Known Problems -============== - -o SunOS 5.6 with C WorkShop Compilers 4.2: hyperslab selections will - fail if library is compiled using optimization of any level. -o TFLOPS: dsets test fails if compiled with optimization turned on. -o J90: tools fail to dispay data for the datasets with a compound datatype. - -Platforms Tested -================ - - AIX 4.3.3 (IBM SP) 3.6.6 | binaries - mpicc using mpich 1.1.2 | are not - mpicc_r using IBM MPI-IO prototype | available - AIX 4.3.2.0 (IBM SP) xlc 5.0.1.0 - Cray J90 10.0.0.7 cc 6.3.0.2 - Cray T3E 2.0.5.29 cc 6.3.0.2 - mpt.1.3 - FreeBSD 4.0 gcc 2.95.2 - HP-UX B.10.20 HP C HP92453-01 A.10.32 - HP-UX B.11.00 HP92453-01 A.11.00.13 HP C Compiler - (static library only, h5toh4 tool is not available) - IRIX 6.5 MIPSpro cc 7.30 - IRIX64 6.5 (64 & n32) MIPSpro cc 7.3.1m - mpt.1.4 - - Linux 2.2.10 SMP gcc 2.95.1 - mpicc(gcc-2.95.1) - gcc (egcs-2.91.66) - mpicc (egcs-2.91.66) - Linux 2.2.16 (RedHat 6.2) gcc 2.95.2 - - OSF1 V4.0 DEC-V5.2-040 - SunOS 5.6 cc WorkShop Compilers 5.0 no optimization - SunOS 5.7 cc WorkShop Compilers 5.0 - SolarisX86 SunOS 5.5.1 gcc version 2.7.2 with --disable-hsizet - TFLOPS 3.2.1 pgcc Rel 3.1-3i - mpich-1.1.2 with local changes - Windows NT4.0 sp5 MSVC++ 6.0 - Windows 98 MSVC++ 6.0 - Windows 2000 MSVC++ 6.0 - - - -%%%%1.2.1%%%% Release Information for hdf5-1.2.1 - -6. Release Information for hdf5-1.2.1 -================================================================ - - -Bug fixes since HDF5-1.2.0 -========================== - -Configuration -------------- - - * The hdf5.h include file was fixed to allow the HDF5 Library to be compiled - with other libraries/applications that use GNU autoconf. - * Configuration for parallel HDF5 was improved. Configure now attempts to - link with libmpi.a and/or libmpio.a as the MPI libraries by default. - It also uses "mpirun" to launch MPI tests by default. It tests to - link MPIO routines during the configuration stage, rather than failing - later as before. One can just do "./configure --enable-parallel" - if the MPI library is in the system library. - -Library -------- - - * Error was fixed which was not allowing dataset region references to have - their regions retrieved correctly. - * Added internal free-lists to reduce memory required by the library and - H5garbage_collect API function - * Fixed error in H5Giterate which was not updating the "index" parameter - correctly. - * Fixed error in hyperslab iteration which was not walking through the - correct sequence of array elements if hyperslabs were staggered in a - certain pattern - * Fixed several other problems in hyperslab iteration code. - -Tests ------- - - * Added additional tests for group and attribute iteration. - * Added additional test for staggered hyperslab iteration. - * Added additional test for random 5-D hyperslab selection. - -Tools ------- - - * Added an option, -V, to show the version information of h5dump. - * Fixed a core dumping bug of h5toh4 when executed on platforms like - TFLOPS. - * The test script for h5toh4 used to not able to detect the hdp - dumper command was not valid. It now detects and reports the - failure of hdp execution. - -Documentation -------------- - - * User's Guide and Reference Manual were updated. - See doc/html/PSandPDF/index.html for more details. - - -Platforms Tested: -================ -Note: Due to the nature of bug fixes, only static versions of the library and tools were tested. - - - AIX 4.3.2 (IBM SP) 3.6.6 - Cray T3E 2.0.4.81 cc 6.3.0.1 - mpt.1.3 - FreeBSD 3.3-STABLE gcc 2.95.2 - HP-UX B.10.20 HP C HP92453-01 A.10.32 - IRIX 6.5 MIPSpro cc 7.30 - IRIX64 6.5 (64 & n32) MIPSpro cc 7.3.1m - mpt.1.3 (SGI MPI 3.2.0.0) - - Linux 2.2.10 SuSE egcs-2.91.66 configured with - (i686-pc-linux-gnu) --disable-hsizet - mpich-1.2.0 egcs-2.91.66 19990314/Linux - - OSF1 V4.0 DEC-V5.2-040 - SunOS 5.6 cc WorkShop Compilers 4.2 no optimization - SunOS 5.7 cc WorkShop Compilers 5.0 - TFLOPS 2.8 cicc (pgcc Rel 3.0-5i) - mpich-1.1.2 with local changes - Windows NT4.0 sp5 MSVC++ 6.0 - -Known Problems: -============== - -o SunOS 5.6 with C WorkShop Compilers 4.2: Hyperslab selections will - fail if library is compiled using optimization of any level. - - - -%%%%1.2.0%%%% Release Information for hdf5-1.2.0 - -5. Release Information for hdf5-1.2.0 -=================================================================== - -A. Platforms Supported - ------------------- - -Operating systems listed below with compiler information and MPI library, if -applicable, are systems that HDF5 1.2.0 was tested on. - - Compiler & libraries - Platform Information Comment - -------- ---------- -------- - - AIX 4.3.2 (IBM SP) 3.6.6 - - Cray J90 10.0.0.6 cc 6.3.0.0 - - Cray T3E 2.0.4.61 cc 6.2.1.0 - mpt.1.3 - - FreeBSD 3.2 gcc 2.95.1 - - HP-UX B.10.20 HP C HP92453-01 A.10.32 - gcc 2.8.1 - - IRIX 6.5 MIPSpro cc 7.30 - - IRIX64 6.5 (64 & n32) MIPSpro cc 7.3.1m - mpt.1.3 (SGI MPI 3.2.0.0) - - Linux 2.2.10 egcs-2.91.66 configured with - --disable-hsizet - lbraries: glibc2 - - OSF1 V4.0 DEC-V5.2-040 - - SunOS 5.6 cc WorkShop Compilers 4.2 - no optimization - gcc 2.8.1 - - SunOS 5.7 cc WorkShop Compilers 5.0 - gcc 2.8.1 - - TFLOPS 2.7.1 cicc (pgcc Rel 3.0-4i) - mpich-1.1.2 with local changes - - Windows NT4.0 intel MSVC++ 5.0 and 6.0 - - Windows NT alpha 4.0 MSVC++ 5.0 - - Windows 98 MSVC++ 5.0 - - -B. Known Problems - -------------- - -* NT alpha 4.0 - Dumper utiliy h5dump fails if linked with DLL. - -* SunOS 5.6 with C WorkShop Compilers 4.2 - Hyperslab selections will fail if library is compiled using optimization - of any level. - - -C. Changes Since Version 1.0.1 - --------------------------- - -1. Documentation - ------------- - -* More examples - -* Updated user guide, reference manual, and format specification. - -* Self-contained documentation for installations isolated from the - Internet. - -* HDF5 Tutorial was added to the documentation - -2. Configuration - ------------- - -* Better detection and support for MPI-IO. - -* Recognition of compilers with known code generation problems. - -* Support for various compilers on a single architecture (e.g., the - native compiler and the GNU compilers). - -* Ability to build from read-only media and with different compilers - and/or options concurrently. - -* Added a libhdf5.settings file which summarizes the configuration - information and is installed along with the library. - -* Builds a shared library on most systems that support it. - -* Support for Cray T3E, J90 and Windows/NT. - -3. Debugging - --------- - -* Improved control and redirection of debugging and tracing messages. - -4. Datatypes - --------- - -* Optimizations to compound datatype conversions and I/O operations. - -* Added nearly 100 optimized conversion functions for native datatypes - including support for non-aligned data. - -* Added support for bitfield, opaque, and enumeration types. - -* Added distinctions between signed and unsigned char types to the - list of predefined native hdf5 datatypes. - -* Added HDF5 type definitions for C9x types like int32_t. - -* Application-defined type conversion functions can handle non-packed - data. - -* Changed the H5Tunregister() function to use wildcards when matching - conversion functions. H5Tregister_hard() and H5Tregister_soft() - were combined into H5Tregister(). - -* Support for variable-length datatypes (arrays of varying length per - dataset element). Variable length strings currently supported only - as variable length arrays of 1-byte integers. - -5. Dataspaces - ---------- - -* New query functions for selections. - -* I/O operations bypass the stripmining loop and go directly to - storage for certain contiguous selections in the absense of type - conversions. In other cases the stripmining buffers are used more - effectively. - -* Reduced the number of I/O requests under certain circumstances, - improving performance on systems with high I/O latency. - -6. Persistent Pointers - ------------------- - -* Object (serial and parallel) and dataset region (serial only) - references are implemented. - -7. Parallel Support - ---------------- - -* Improved parallel I/O performance. - -* Supported new platforms: Cray T3E, Linux, DEC Cluster. - -* Use vendor supported version of MPIO on SGI O2K and Cray platforms. - -* Improved the algorithm that translates an HDF5 hyperslab selection - into an MPI type for better collective I/O performance. - -8. New API functions - ----------------- - - a. Property List Interface: - ------------------------ - - H5Pset_xfer - set data transfer properties - H5Pset_preserve - set dataset transfer property list status - H5Pget_preserve - get dataset transfer property list status - H5Pset_hyper_cache - indicates whether to cache hyperslab blocks during I/O - H5Pget_hyper_cache - returns information regarding the caching of - hyperslab blocks during I/O - H5Pget_btree_ratios - sets B-tree split ratios for a dataset - transfer property list - H5Pset_btree_ratios - gets B-tree split ratios for a dataset - transfer property list - H5Pset_vlen_mem_manager - sets the memory manager for variable-length - datatype allocation - H5Pget_vlen_mem_manager - sets the memory manager for variable-length - datatype allocation - - b. Dataset Interface: - ------------------ - - H5Diterate - iterate over all selected elements in a dataspace - H5Dget_storage_size - return the amount of storage required for a dataset - H5Dvlen_reclaim - reclaim VL datatype memory buffers - - c. Dataspace Interface: - -------------------- - H5Sget_select_hyper_nblocks - get number of hyperslab blocks - H5Sget_select_hyper_blocklist - get the list of hyperslab blocks - currently selected - H5Sget_select_elem_npoints - get the number of element points - in the current selection - H5Sget_select_elem_pointlist - get the list of element points - currently selected - H5Sget_select_bounds - gets the bounding box containing - the current selection - - d. Datatype Interface: - ------------------- - H5Tget_super - return the base datatype from which a - datatype is derived - H5Tvlen_create - creates a new variable-length dataype - H5Tenum_create - creates a new enumeration datatype - H5Tenum_insert - inserts a new enumeration datatype member - H5Tenum_nameof - returns the symbol name corresponding to a - specified member of an enumeration datatype - H5Tvalueof - return the value corresponding to a - specified member of an enumeration datatype - H5Tget_member_value - return the value of an enumeration datatype member - H5Tset_tag - tags an opaque datatype - H5Tget_tag - gets the tag associated with an opaque datatype - - e. Identifier Interface: - --------------------- - H5Iget_type - retrieve the type of an object - - f. Reference Interface: - -------------------- - H5Rcreate - creates a reference - H5Rdereference - open the HDF5 object referenced - H5Rget_region - retrieve a dataspace with the specified region selected - H5Rget_object_type - retrieve the type of object that an - object reference points to - - g. Ragged Arrays (alpha) (names of those API functions were changed): - ------------------------------------------------------------------ - H5RAcreate - create a new ragged array (old name was H5Rcreate) - H5RAopen - open an existing array (old name was H5Ropen) - H5RAclose - close a ragged array (old name was H5Rclose) - H5RAwrite - write to an array (old name was H5Rwrite) - H5RAread - read from an array (old name was H5Rread) - - -9. Tools - ----- - -* Enhancements to the h5ls tool including the ability to list objects - from more than one file, to display raw hexadecimal data, to - show file addresses for raw data, to format output more reasonably, - to show object attributes, and to perform a recursive listing, - -* Enhancements to h5dump: support new data types added since previous - versions. - -* h5toh4: An hdf5 to hdf4 converter. - - - -%%%%1.0.1%%%% Release Information for hdf5-1.0.1 - -4. Changes from Release 1.0.0 to Release 1.0.1 -===================================================================== - -* [Improvement]: configure sets up the Makefile in the parallel tests - suit (testpar/) correctly. - -* [Bug-Fix]: Configure failed for all IRIX versions other than 6.3. - It now configures correctly for all IRIX 6.x version. - -* Released Parallel HDF5 - - Supported Features: - ------------------ - - HDF5 files are accessed according to the communicator and INFO - object defined in the property list set by H5Pset_mpi. - - Independent read and write accesses to fixed and extendable dimension - datasets. - - Collective read and write accesses to fixed dimension datasets. - - Supported Platforms: - ------------------- - - Intel Red - IBM SP2 - SGI Origin 2000 - - Changes In This Release: - ----------------------- - - o Support of Access to Extendable Dimension Datasets. - Extendable dimension datasets must use chunked storage methods. - A new function, H5Dextend, is created to extend the current - dimensions of a dataset. The current release requires the - MPI application must make a collective call to extend the - dimensions of an extendable dataset before writing to the - newly extended area. (The serial does not require the - call of H5Dextend. The dimensions of an extendable - dataset is increased when data is written to beyond the - current dimensions but within the maximum dimensions.) - The required collective call of H5Dextend may be relaxed - in future release. - - This release only support independent read and write accesses - to extendable datasets. Collective accesses to extendable - datasets will be implemented in future releases. - - o Collective access to fixed dimension datasets. - Collective access to a dataset can be specified in the transfer - property list argument in H5Dread and H5Dwrite. The current - release supports collective access to fixed dimension datasets. - Collective access to extendable datasets will be implemented in - future releases. - - o HDF5 files are opened according to Communicator and INFO object. - H5Dopen now records the communicator and INFO setup by H5Pset_mmpi - and pass them to the corresponding MPIO open file calls for - processing. - - o This release has been tested on IBM SP2, Intel Red and SGI Origin 2000 - systems. It uses the ROMIO version of MPIO interface for parallel - I/O supports. - - - -%%%%1.0.0%%%% Release Information for hdf5-1.0.0 - -3. Changes from the Beta 1.0.0 Release to Release 1.0.0 -==================================================================== - -* Added fill values for datasets. For contiguous datasets fill value - performance may be quite poor since the fill value is written to the - entire dataset when the dataset is created. This will be remedied - in a future version. Chunked datasets using fill values do not - incur any additional overhead. See H5Pset_fill_value(). - -* Multiple hdf5 files can be "mounted" on one another to create a - larger virtual file. See H5Fmount(). - -* Object names can be removed or changed but objects are never - actually removed from the file yet. See H5Gunlink() and H5Gmove(). - -* Added a tuning mechanism for B-trees to insure that sequential - writes to chunked datasets use less overhead. See H5Pset_btree_ratios(). - -* Various optimizations and bug fixes. - - - -%%%%1.0.0 Beta%%%% Release Information for hdf5-1.0.0 Beta - -2. Changes from the Second Alpha 1.0.0 Release to the Beta 1.0.0 Release -========================================================================= - -* Strided hyperslab selections in dataspaces now working. - -* The compression API has been replaced with a more general filter - API. See doc/html/Filters.html for details. - -* Alpha-quality 2d ragged arrays are implemented as a layer built on - top of other hdf5 objects. The API and storage format will almost - certainly change. - -* More debugging support including API tracing. See Debugging.html. - -* C and Fortran style 8-bit fixed-length character string types are - supported with space or null padding or null termination and - translations between them. - -* Added function H5Fflush() to write all cached data immediately to - the file. - -* Datasets maintain a modification time which can be retrieved with - H5Gstat(). - -* The h5ls tool can display much more information, including all the - values of a dataset. - - - -%%%%1.0.0 Alpha 2%%%% Release Information for hdf5-1.0.0 Alpha 2 - -1. Changes from the First Alpha 1.0.0 Release to - the Second Alpha 1.0.0 Release -===================================================================== - -* Two of the packages have been renamed. The data space API has been - renamed from `H5P' to `H5S' and the property list (template) API has - been renamed from `H5C' to `H5P'. - -* The new attribute API `H5A' has been added. An attribute is a small - dataset which can be attached to some other object (for instance, a - 4x4 transformation matrix attached to a 3-dimensional dataset, or an - English abstract attached to a group). - -* The error handling API `H5E' has been completed. By default, when an - API function returns failure an error stack is displayed on the - standard error stream. The H5Eset_auto() controls the automatic - printing and H5E_BEGIN_TRY/H5E_END_TRY macros can temporarily - disable the automatic error printing. - -* Support for large files and datasets (>2GB) has been added. There - is an html document that describes how it works. Some of the types - for function arguments have changed to support this: all arguments - pertaining to sizes of memory objects are `size_t' and all arguments - pertaining to file sizes are `hsize_t'. - -* More data type conversions have been added although none of them are - fine tuned for performance. There are new converters from integer - to integer and float to float, but not between integers and floating - points. A bug has been fixed in the converter between compound - types. - -* The numbered types have been removed from the API: int8, uint8, - int16, uint16, int32, uint32, int64, uint64, float32, and float64. - Use standard C types instead. Similarly, the numbered types were - removed from the H5T_NATIVE_* architecture; use unnumbered types - which correspond to the standard C types like H5T_NATIVE_INT. - -* More debugging support was added. If tracing is enabled at - configuration time (the default) and the HDF5_TRACE environment - variable is set to a file descriptor then all API calls will emit - the function name, argument names and values, and return value on - that file number. There is an html document that describes this. - If appropriate debugging options are enabled at configuration time, - some packages will display performance information on stderr. - -* Data types can be stored in the file as independent objects and - multiple datasets can share a data type. - -* The raw data I/O stream has been implemented and the application can - control meta and raw data caches, so I/O performance should be - improved from the first alpha release. - -* Group and attribute query functions have been implemented so it is - now possible to find out the contents of a file with no prior - knowledge. - -* External raw data storage allows datasets to be written by other - applications or I/O libraries and described and accessed through - HDF5. - -* Hard and soft (symbolic) links are implemented which allow groups to - share objects. Dangling and recursive symbolic links are supported. - -* User-defined data compression is implemented although we may - generalize the interface to allow arbitrary user-defined filters - which can be used for compression, checksums, encryption, - performance monitoring, etc. The publicly-available `deflate' - method is predefined if the GNU libz.a can be found at configuration - time. - -* The configuration scripts have been modified to make it easier to - build debugging vs. production versions of the library. - -* The library automatically checks that the application was compiled - with the correct version of header files. - - - Parallel HDF5 Changes - -* Parallel support for fixed dimension datasets with contiguous or - chunked storages. Also, support unlimited dimension datasets which - must use chunk storage. No parallel support for compressed datasets. - -* Collective data transfer for H5Dread/H5Dwrite. Collective access - support for datasets with contiguous storage only, thus only fixed - dimension datasets for now. - -* H5Pset_mpi and H5Pget_mpi no longer have the access_mode - argument. It is taken over by the data-transfer property list - of H5Dread/H5Dwrite. - -* New functions H5Pset_xfer and H5Pget_xfer to handle the - specification of independent or collective data transfer_mode - in the dataset transfer properties list. The properties - list can be used to specify data transfer mode in the H5Dwrite - and H5Dread function calls. - -* Added parallel support for datasets with chunked storage layout. - When a dataset is extend in a PHDF5 file, all processes that open - the file must collectively call H5Dextend with identical new dimension - sizes. - - - LIST OF API FUNCTIONS - -The following functions are implemented. Errors are returned if an -attempt is made to use some feature which is not implemented and -printing the error stack will show `not implemented yet'. - -Library - H5check - check that lib version matches header version - H5open - initialize library (happens automatically) - H5close - shut down the library (happens automatically) - H5dont_atexit - don't call H5close on exit - H5get_libversion - retrieve library version info - H5check_version - check for specific library version - -Property Lists - H5Pclose - release template resources - H5Pcopy - copy a template - H5Pcreate - create a new template - H5Pget_chunk - get chunked storage properties - H5Pset_chunk - set chunked storage properties - H5Pget_class - get template class - H5Pget_istore_k - get chunked storage properties - H5Pset_istore_k - set chunked storage properties - H5Pget_layout - get raw data layout class - H5Pset_layout - set raw data layout class - H5Pget_sizes - get address and size sizes - H5Pset_sizes - set address and size sizes - H5Pget_sym_k - get symbol table storage properties - H5Pset_sym_k - set symbol table storage properties - H5Pget_userblock - get user-block size - H5Pset_userblock - set user-block size - H5Pget_version - get file version numbers - H5Pget_alignment - get data alignment properties - H5Pset_alignment - set data alignment properties - H5Pget_external_count- get count of external data files - H5Pget_external - get information about an external data file - H5Pset_external - add a new external data file to the list - H5Pget_driver - get low-level file driver class - H5Pget_stdio - get properties for stdio low-level driver - H5Pset_stdio - set properties for stdio low-level driver - H5Pget_sec2 - get properties for sec2 low-level driver - H5Pset_sec2 - set properties for sec2 low-level driver - H5Pget_core - get properties for core low-level driver - H5Pset_core - set properties for core low-level driver - H5Pget_split - get properties for split low-level driver - H5Pset_split - set properties for split low-level driver - H5P_get_family - get properties for family low-level driver - H5P_set_family - set properties for family low-level driver - H5Pget_cache - get meta- and raw-data caching properties - H5Pset_cache - set meta- and raw-data caching properties - H5Pget_buffer - get raw-data I/O pipe buffer properties - H5Pset_buffer - set raw-data I/O pipe buffer properties - H5Pget_preserve - get type conversion preservation properties - H5Pset_preserve - set type conversion preservation properties - H5Pget_nfilters - get number of raw data filters - H5Pget_filter - get raw data filter properties - H5Pset_filter - set raw data filter properties - H5Pset_deflate - set deflate compression filter properties - H5Pget_mpi - get MPI-IO properties - H5Pset_mpi - set MPI-IO properties - H5Pget_xfer - get data transfer properties - + H5Pset_xfer - set data transfer properties - + H5Pset_preserve - set dataset transfer property list status - + H5Pget_preserve - get dataset transfer property list status - + H5Pset_hyper_cache - indicates whether to cache hyperslab blocks during I/O - + H5Pget_hyper_cache - returns information regarding the caching of - hyperslab blocks during I/O - + H5Pget_btree_ratios - sets B-tree split ratios for a dataset - transfer property list - + H5Pset_btree_ratios - gets B-tree split ratios for a dataset - transfer property list - + H5Pset_vlen_mem_manager - sets the memory manager for variable-length - datatype allocation - + H5Pget_vlen_mem_manager - sets the memory manager for variable-length - datatype allocation - -Datasets - H5Dclose - release dataset resources - H5Dcreate - create a new dataset - H5Dget_space - get data space - H5Dget_type - get data type - H5Dget_create_plist - get dataset creation properties - H5Dopen - open an existing dataset - H5Dread - read raw data - H5Dwrite - write raw data - H5Dextend - extend a dataset - + H5Diterate - iterate over all selected elements in a dataspace - + H5Dget_storage_size - return the amount of storage required for a dataset - + H5Dvlen_reclaim - reclaim VL datatype memory buffers - -Attributes - H5Acreate - create a new attribute - H5Aopen_name - open an attribute by name - H5Aopen_idx - open an attribute by number - H5Awrite - write values into an attribute - H5Aread - read values from an attribute - H5Aget_space - get attribute data space - H5Aget_type - get attribute data type - H5Aget_name - get attribute name - H5Anum_attrs - return the number of attributes for an object - H5Aiterate - iterate over an object's attributes - H5Adelete - delete an attribute - H5Aclose - close an attribute - -Errors - H5Eclear - clear the error stack - H5Eprint - print an error stack - H5Eget_auto - get automatic error reporting settings - H5Eset_auto - set automatic error reporting - H5Ewalk - iterate over the error stack - H5Ewalk_cb - the default error stack iterator function - H5Eget_major - get the message for the major error number - H5Eget_minor - get the message for the minor error number - -Files - H5Fclose - close a file and release resources - H5Fcreate - create a new file - H5Fget_create_plist - get file creation property list - H5Fget_access_plist - get file access property list - H5Fis_hdf5 - determine if a file is an hdf5 file - H5Fopen - open an existing file - H5Freopen - reopen an HDF5 file - H5Fmount - mount a file - H5Funmount - unmount a file - H5Fflush - flush all buffers associated with a file to disk - -Groups - H5Gclose - close a group and release resources - H5Gcreate - create a new group - H5Gopen - open an existing group - H5Giterate - iterate over the contents of a group - H5Gmove - change the name of some object - H5Glink - create a hard or soft link to an object - H5Gunlink - break the link between a name and an object - H5Gget_objinfo - get information about a group entry - H5Gget_linkval - get the value of a soft link - H5Gget_comment - get the comment string for an object - H5Gset_comment - set the comment string for an object - -Dataspaces - H5Screate - create a new data space - H5Scopy - copy a data space - H5Sclose - release data space - H5Screate_simple - create a new simple data space - H5Sset_space - set simple data space extents - H5Sis_simple - determine if data space is simple - H5Sset_extent_simple - set simple data space dimensionality and size - H5Sget_simple_extent_npoints - get number of points in simple extent - H5Sget_simple_extent_ndims - get simple data space dimensionality - H5Sget_simple_extent_dims - get simple data space size - H5Sget_simple_extent_type - get type of simple extent - H5Sset_extent_none - reset extent to be empty - H5Sextent_copy - copy the extent from one data space to another - H5Sget_select_npoints - get number of points selected for I/O - H5Sselect_hyperslab - set hyperslab dataspace selection - H5Sselect_elements - set element sequence dataspace selection - H5Sselect_all - select entire extent for I/O - H5Sselect_none - deselect all elements of extent - H5Soffset_simple - set selection offset - H5Sselect_valid - determine if selection is valid for extent - + H5Sget_select_hyper_nblocks - get number of hyperslab blocks - + H5Sget_select_hyper_blocklist - get the list of hyperslab blocks - currently selected - + H5Sget_select_elem_npoints - get the number of element points - in the current selection - + H5Sget_select_elem_pointlist - get the list of element points - currently selected - + H5Sget_select_bounds - gets the bounding box containing - the current selection - -Datatypes - H5Tclose - release data type resources - H5Topen - open a named data type - H5Tcommit - name a data type - H5Tcommitted - determine if a type is named - H5Tcopy - copy a data type - H5Tcreate - create a new data type - H5Tequal - compare two data types - H5Tlock - lock type to prevent changes - H5Tfind - find a data type conversion function - H5Tconvert - convert data from one type to another - H5Tregister - register a conversion function - H5Tunregister - remove a conversion function - H5Tget_overflow - get function that handles overflow conv. cases - H5Tset_overflow - set function to handle overflow conversion cases - H5Tget_class - get data type class - H5Tget_cset - get character set - H5Tget_ebias - get exponent bias - H5Tget_fields - get floating point fields - H5Tget_inpad - get inter-field padding - H5Tget_member_dims - get struct member dimensions - H5Tget_member_name - get struct member name - H5Tget_member_offset - get struct member byte offset - H5Tget_member_type - get struct member type - H5Tget_nmembers - get number of struct members - H5Tget_norm - get floating point normalization - H5Tget_offset - get bit offset within type - H5Tget_order - get byte order - H5Tget_pad - get padding type - H5Tget_precision - get precision in bits - H5Tget_sign - get integer sign type - H5Tget_size - get size in bytes - H5Tget_strpad - get string padding - H5Tinsert - insert scalar struct member - H5Tinsert_array - insert array struct member - H5Tpack - pack struct members - H5Tset_cset - set character set - H5Tset_ebias - set exponent bias - H5Tset_fields - set floating point fields - H5Tset_inpad - set inter-field padding - H5Tset_norm - set floating point normalization - H5Tset_offset - set bit offset within type - H5Tset_order - set byte order - H5Tset_pad - set padding type - H5Tset_precision - set precision in bits - H5Tset_sign - set integer sign type - H5Tset_size - set size in bytes - H5Tset_strpad - set string padding - + H5Tget_super - return the base datatype from which a - datatype is derived - + H5Tvlen_create - creates a new variable-length dataype - + H5Tenum_create - creates a new enumeration datatype - + H5Tenum_insert - inserts a new enumeration datatype member - + H5Tenum_nameof - returns the symbol name corresponding to a - specified member of an enumeration datatype - + H5Tvalueof - return the value corresponding to a - specified member of an enumeration datatype - + H5Tget_member_value - return the value of an enumeration datatype member - + H5Tset_tag - tags an opaque datatype - + H5Tget_tag - gets the tag associated with an opaque datatype - - - H5Tregister_hard - register specific type conversion function - - H5Tregister_soft - register general type conversion function - -Filters - H5Tregister - register a conversion function - -Compression - H5Zregister - register new compression and uncompression - functions for a method specified by a method number - -Identifiers - + H5Iget_type - retrieve the type of an object - -References - + H5Rcreate - creates a reference - + H5Rdereference - open the HDF5 object referenced - + H5Rget_region - retrieve a dataspace with the specified region selected - + H5Rget_object_type - retrieve the type of object that an - object reference points to - -Ragged Arrays (alpha) - H5RAcreate - create a new ragged array - H5RAopen - open an existing array - H5RAclose - close a ragged array - H5RAwrite - write to an array - H5RAread - read from an array - - diff --git a/doc/html/ADGuide/ImageSpec.html b/doc/html/ADGuide/ImageSpec.html deleted file mode 100755 index f5bf217..0000000 --- a/doc/html/ADGuide/ImageSpec.html +++ /dev/null @@ -1,1279 +0,0 @@ - - - - - - Image Specification - - - - - - - - - - -
-
- - - -
- HDF5 documents and links 
- Introduction to HDF5 
- -
- HDF5 User's Guide 
- HDF5 Reference Manual 
- HDF5 Application Developer's Guide 
-
-
-
-
-

-HDF5 Image and Palette Specification

- -
-

-Version 1.2

-The HDF5 specification defines the standard objects and storage for the -standard HDF5 objects. (For information about the HDF5 library, model and -specification, see the HDF documentation.)  This document is an additional -specification do define a standard profile for how to store image data -in HDF5. Image data in HDF5 is stored as HDF5 datasets with standard attributes -to define the properties of the image. -

This specification is primarily concerned with two dimensional raster -data similar to HDF4 Raster Images.  Specifications for storing other -types of imagery will be covered in other documents. -

This specification defines: -

    -
  • -Standard storage and attributes for an Image dataset (Section -1)
  • - -
  • -Standard storage and attributes for Palettes (Section -2)
  • - -
  • -Standard for associating Palettes with Images. (Section -3)
  • -
- -

-1. HDF5 Image Specification

- -

-1.1 Overview

-Image data is stored as an HDF5 dataset with values of HDF5 class Integer -or Float.  A common example would be a two dimensional dataset, with -elements of class Integer, e.g., a two dimensional array of unsigned 8 -bit integers.  However, this specification does not limit the dimensions -or number type that may be used for an Image. -

The dataset for an image is distinguished from other datasets by giving -it an attribute "CLASS=IMAGE".  In addition, the Image dataset may -have an optional attribute "PALETTE" that is an array of object references -for zero or more palettes. The Image dataset may have additional attributes -to describe the image data, as defined in Section 1.2. -

A Palette is an HDF5 dataset which contains color map information.  -A Pallet dataset has an attribute "CLASS=PALETTE" and other attributes -indicating the type and size of the palette, as defined in Section -2.1.  A Palette is an independent object, which can be shared -among several Image datasets. -

-1.2  Image Attributes

-The attributes for the Image are scalars unless otherwise noted.  -The length of String valued attributes should be at least the number of -characters. Optionally, String valued attributes may be stored in a String -longer than the minimum, in which case it must be zero terminated or null -padded.  "Required" attributes must always be used. "Optional" attributes -must be used when required. -
  -

-Attributes

- -
-
-Attribute name="CLASS" (Required)
- -
-This attribute is type H5T_C_S1, with size 5.
- -
-For all Images, the value of this attribute is "IMAGE".
- -
-
- -
-This attribute identifies this data set as intended to be interpreted as -an image that conforms to the specifications on this page.
-
- -
-Attribute name="PALETTE"
- -
-
-A Image dataset within an HDF5 file may optionally specify an array of -palettes to be viewed with. The dataset will have an attribute field called -"PALETTE" which contains a one-dimensional array of object reference -pointers (HDF5 datatype H5T_STD_REF_OBJ) which refer to palettes in the -file. The palette datasets must conform to the Palette specification in -section -2 below. The first palette in this array will be the default palette -that the data may be viewed with.
-
- -
-
-
- -
-Attribute name="IMAGE_SUBCLASS"
- -
-If present, the value of this attribute indicates the type of Palette that -should be used with the Image.  This attribute is a scalar of type -H5T_C_S1, with size according to the string plus one.  The values -are:
- -
-
-"IMAGE_GRAYSCALE" (length 15)
- -
-A grayscale image
- -
-"IMAGE_BITMAP" (length 12)
- -
-A bit map image
- -
-"IMAGE_TRUECOLOR" (length 15)
- -
-A truecolor image
- -
-"IMAGE_INDEXED" (length 13)
- -
-An indexed image
- -
-
-
- -
-Attribute name="INTERLACE_MODE"
- -
-For images with more than one component for each pixel, this optional attribute -specifies the layout of the data. The values are type H5T_C_S1 of length -15. See section 1.3 for information about the -storage layout for data.
- -
-"INTERLACE_PIXEL" (default): the component value for a pixel are contiguous.
- -
-"INTERLACE_PLANE": each component is stored as a plane.
- -
-
- -
-Attribute name="DISPLAY_ORIGIN"
- -
-This optional attribute indicates the intended orientation of the data -on a two-dimensional raster display.  The value indicates which corner -the pixel at (0, 0) should be viewed.  The values are type H5T_C_S1 -of length 2. If DISPLAY_ORIGIN is not set, the orientation is undefined.
- -
-"UL": (0,0) is at the upper left.
- -
-"LL": (0,0) is at the lower left.
- -
-"UR": (0,0) is at the upper right.
- -
-"LR": (0,0) is at the lower right.
-
- -
-Attribute name="IMAGE_WHITE_IS_ZERO"
- -
-
-This attribute is of type H5T_NATIVE_UCHAR.  0 = false, 1 = true .  -This is used for images with IMAGE_SUBCLASS="IMAGE_GRAYSCALE" or "IMAGE_BITMAP".
-
- -
-
-Attribute name="IMAGE_MINMAXRANGE"
- -
-If present, this attribute is an array of two numbers, of the same HDF5 -datatype as the data.  The first element is the minimum value of the -data, and the second is the maximum.  This is used for images with -IMAGE_SUBCLASS="IMAGE_GRAYSCALE", "IMAGE_BITMAP" or "IMAGE_INDEXED".
-
- -
-Attribute name="IMAGE_BACKGROUNDINDEX"
- -
-
-If set, this attribute indicates the index value that should be interpreted -as the "background color".  This attribute is HDF5 type H5T_NATIVE_UINT.
-
- -
-Attribute name="IMAGE_TRANSPARENCY"
- -
-
-If set, this attribute indicates the index value that should be interpreted -as the "transparent color".  This attribute is HDF5 type H5T_NATIVE_UINT.  -This attribute may not be used for IMAGE_SUBCLASS="IMAGE_TRUE_COLOR".
-
- -
-Attribute name="IMAGE_ASPECTRATIO"
- -
-
-If set, this attribute indicates the aspect ratio.
-
- -
-Attribute name="IMAGE_COLORMODEL"
- -
-
-If set, this attribute indicates the color model of Palette that should -be used with the Image.  This attribute is of type H5T_C_S1, with -size 3, 4, or 5.  The value is one of the color models described in -the Palette specification in section 2.2 below.  -This attribute may be used only for IMAGE_SUBCLASS="IMAGE_TRUECOLOR" or -"IMAGE_INDEXED".
-
- -
-Attribute name="IMAGE_GAMMACORRECTION"
- -
-
-If set, this attribute gives the Gamma correction.  The attribute -is type H5T_NATIVE_FLOAT.  This attribute may be used only for IMAGE_SUBCLASS="IMAGE_TRUECOLOR" -or "IMAGE_INDEXED".
-
-Attribute name="IMAGE_VERSION" (Required) -
-
-This attribute is of type H5T_C_S1, with size corresponding to the length -of the version string.  This attribute identifies the version number -of this specification to which it conforms.  The current version number -is "1.2".
- -
  -

  -
  -
  -

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Table 1. Attributes of an Image Dataset
Attribute Name(R = Required -
O= Optional)
TypeString SizeValue
CLASSRString5"IMAGE"
PALETTEOArray Object References<references to Palette datasets>1
IMAGE_SUBCLASSO2String15,  -
12,  -
15, -
13
-
-"IMAGE_GRAYSCALE",
- -
-"IMAGE_BITMAP",
- -
-"IMAGE_TRUECOLOR",
- -
-"IMAGE_INDEXED"
-
INTERLACE_MODEO3,6String15The layout of components if more than one component per pixel.
DISPLAY_ORIGINOString2If set, indicates the intended location of the pixel (0,0).
IMAGE_WHITE_IS_ZEROO3,4Unsigned Integer0 = false, 1 = true
IMAGE_MINMAXRANGEO3,5Array [2] <same datatype as data values>The (<minimum>, <maximum>) value of the data.
IMAGE_BACKGROUNDINDEXO3Unsigned IntegerThe index of the background color.
IMAGE_TRANSPARENCYO3,5Unsigned IntegerThe index of the transparent color.
IMAGE_ASPECTRATIOO3,4Unsigned IntegerThe aspect ratio.
IMAGE_COLORMODELO3,6String3, 4, or 5The color model, as defined below in the Palette specification for -attribute PAL_COLORMODEL.
IMAGE_GAMMACORRECTIONO3,6FloatThe gamma correction.
IMAGE_VERSIONRString3"1.2"
- -
1.  The first element of the array is the default -Palette. -
2.  This attribute is required for images -that use one of the standard color map types listed. -
3. This attribute is required if set for the source -image, in the case that the image is translated from another file into -HDF5. -
4.  This applies to:  IMAGE_SUBCLASS="IMAGE_GRAYSCALE" -or "IMAGE_BITMAP". -
5.  This applies to:  IMAGE_SUBCLASS="IMAGE_GRAYSCALE", -"IMAGE_BITMAP", or "IMAGE_INDEXED". -
6.  This applies to: IMAGE_SUBCLASS="IMAGE_TRUECOLOR", -or "IMAGE_INDEXED".
-
-Table 2 summarizes the standard attributes for an Image datasets using -the common sub-classes. R means that the attribute listed on the leftmost -column is Required for the image subclass on the first row, O means that -the attribute is Optional for that subclass and N that the attribute cannot -be applied to that subclass. The two first rows show the only required -attributes -for all subclasses. -
  - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Table 2a. Applicability of Attributes to IMAGE sub-classes
IMAGE_SUBCLASS1IMAGE_GRAYSCALEIMAGE_BITMAP
CLASSRR
IMAGE_VERSIONRR
INTERLACE_MODENN
IMAGE_WHITE_IS_ZERORR
IMAGE_MINMAXRANGEOO
IMAGE_BACKGROUNDINDEXOO
IMAGE_TRANSPARENCYOO
IMAGE_ASPECTRATIOOO
IMAGE_COLORMODELNN
IMAGE_GAMMACORRECTIONNN
PALETTEOO
DISPLAY_ORIGINOO
- -
 
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Table 2b. Applicability of Attributes to IMAGE sub-classes
IMAGE_SUBCLASSIMAGE_TRUECOLORIMAGE_INDEXED
CLASSRR
IMAGE_VERSIONRR
INTERLACE_MODERN
IMAGE_WHITE_IS_ZERONN
IMAGE_MINMAXRANGENO
IMAGE_BACKGROUNDINDEXNO
IMAGE_TRANSPARENCYNO
IMAGE_ASPECTRATIOOO
IMAGE_COLORMODELOO
IMAGE_GAMMACORRECTIONOO
PALETTEOO
DISPLAY_ORIGINOO
- -

-1.3 Storage Layout and Properties for Images

-In the case of an image with more than one component per pixel (e.g., Red, -Green, and Blue), the data may be arranged in one of two ways.  Following -HDF4 terminology, the data may be interlaced by pixel or by plane, which -should be indicated by the INTERLACE_MODE  attribute.  In both -cases, the dataset will have a dataspace with three dimensions, height, -width, and components.  The interlace modes specify different orders -for the dimensions. -
  - - - - - - - - - - - - - - - - - - - - -
Table 3. Storage of multiple component image data.
Interlace ModeDimensions in the Dataspace
INTERLACE_PIXEL[height][width][pixel components]
INTERLACE_PLANE[pixel components][height][width]
- -

For example, consider a 5 (rows) by 10 (column) image, with Red, Green, -and Blue components.  Each component is an unsigned byte. In HDF5, -the datatype would be declared as an unsigned 8 bit integer.  For -pixel interlace, the dataspace would be a three dimensional array, with -dimensions: [10][5][3].  For plane interleave, the dataspace would -be three dimensions: [3][10][5]. -

In the case of images with only one component, the dataspace may be -either a two dimensional array, or a three dimensional array with the third -dimension of size 1.  For example, a 5 by 10 image with 8 bit color -indexes would be an HDF5 dataset with type unsigned 8 bit integer.  -The dataspace could be either a two dimensional array, with dimensions -[10][5], or three dimensions, with dimensions either [10][5][1] or [1][10][5]. -

Image datasets may be stored with any chunking or compression properties -supported by HDF5. -

A note concerning compatibility with HDF5 GR interface: An Image -dataset is stored as an HDF5 dataset.  It is important to note that -the order of the dimensions is the same as for any other HDF5 dataset.  -For a two dimensional image that is to be stored as a series of horizontal -scan lines, with the scan lines contiguous (i.e., the fastest changing -dimension is 'width'), the image will have a dataspace with dim[0] = -height and dim[1] = width.  This is completely consistent -with all other HDF5 datasets. -

Users familiar with HDF4 should be cautioned that this is not the -same as HDF4, and specifically is not consistent with what the HDF4 -GR interface does. -
  -

-2.  HDF5 Palette Specification

- -

-2.1 Overview

-A palette is the means by which color is applied to an image and is also -referred to as a color lookup table. It is a table in which every row contains -the numerical representation of a particular color. In the example of an -8 bit standard RGB color model palette, this numerical representation of -a color is presented as a triplet specifying the intensity of red, green, -and blue components that make up each color. -
-

- -

In this example, the color component numeric type is an 8 bit unsigned -integer. While this is most common and recommended for general use, other -component color numeric datatypes, such as a 16 bit unsigned integer , -may be used. This type is specified as the type attribute of the palette -dataset. (see H5Tget_type(), H5Tset_type()) -

The minimum and maximum values of the component color numeric are specified -as attribute of the palette dataset. See below (attribute PAL_MINMAXNUMERIC). -If these attributes do not exist, it is assumed that the range of values -will fill the space of the color numeric type. i.e. with an 8 bit unsigned -integer, the valid range would be 0 to 255 for each color component. -

The HDF5 palette specification additionally allows for color models -beyond RGB. YUV, HSV, CMY, CMYK, YCbCr color models are supported, and -may be specified as a color model attribute of the palette dataset. (see -"Palette Attributes" for details). -

In HDF 4 and earlier, palettes were limited to 256 colors. The HDF5 -palette specification allows for palettes of varying length. The length -is specified as the number of rows of the palette dataset. -
  -
  - - - - -
Important Note: The specification of the Indexed -Palette will change substantially in the next version.  The Palette -described here is denigrated and is not supported.
- -
  - - - - -
Denigrated -

In a standard palette, the color entries are indexed directly. HDF5 -supports the notion of a range index table. Such a table defines an ascending -ordered list of ranges that map dataset values to the palette. If a range -index table exists for the palette, the PAL_TYPE attribute will be set -to "RANGEINDEX", and the PAL_RANGEINDEX attribute will contain an object -reference to a range index table array. If not, the PAL_TYPE attribute -either does not exist, or will be set to "STANDARD". -

The range index table array consists of a one dimensional array with -the same length as the palette dataset - 1. Ideally, the range index would -be of the same type as the dataset it refers to, however this is not a -requirement. -

Example 2: A range index array of type floating point -

-

- -

The range index array attribute defines the "to" of the range. -Notice that the range index array attribute is one less entry in size than -the palette. The first entry of 0.1259, specifies that all values below -and up to 0.1259 inclusive, will map to the first palette entry. The second -entry signifies that all values greater than 0.1259 up to 0.3278 inclusive, -will map to the second palette entry, etc. All value greater than the last -range index array attribute (100000) map to the last entry in the palette.

- -

-2.2. Palette Attributes

-A palette exists in an HDF file as an independent data set with accompanying -attributes.  The Palette attributes are scalars except where noted -otherwise.  String values should have size the length of the string -value plus one.  "Required" attributes must be used.  "Optional" -attributes must be used when required. -

These attributes are defined as follows: -

-
-Attribute name="CLASS" (Required)
- -
-This attribute is of type H5T_C_S1, with size 7.
- -
-For all palettes, the value of this attribute is "PALETTE". This attribute -identifies this palette data set as a palette that conforms to the specifications -on this page.
- -
-Attribute name="PAL_COLORMODEL" (Required)
- -
-This attribute is of type H5T_C_S1, with size 3, 4, or 5.
- -
-Possible values for this are "RGB", "YUV", "CMY", "CMYK", "YCbCr", "HSV".
- -
-This defines the color model that the entries in the palette data set represent.
- -
-
-"RGB"
- -
-Each color index contains a triplet where the the first value defines the -red component, second defines the green component, and the third the blue -component.
- -
-"CMY"
- -
-Each color index contains a triplet where the the first value defines the -cyan component, second defines the magenta component, and the third the -yellow component.
- -
-"CMYK"
- -
-Each color index contains a quadruplet where the the first value defines -the cyan component, second defines the magenta component, the third the -yellow component, and the forth the black component.
- -
-"YCbCr"
- -
-Class Y encoding model. Each color index contains a triplet where the the -first value defines the luminance, second defines the Cb Chromonance, and -the third the Cr Chromonance.
- -
-"YUV"
- -
-Composite encoding color model. Each color index contains a triplet where -the the first value defines the luminance component, second defines the -chromonance component, and the third the value component.
- -
-"HSV"
- -
-Each color index contains a triplet where the the first value defines the -hue component, second defines the saturation component, and the third the -value component. The hue component defines the hue spectrum with a low -value representing magenta/red progressing to a high value which would -represent blue/magenta, passing through yellow, green, cyan. A low value -for the saturation component means less color saturation than a high value. -A low value for value will be darker than a high value.
- -
-
-
- -
-Attribute name="PAL_TYPE" (Required)
- -
-This attribute is of type H5T_C_S1, with size 9 or 10.
- -
-The current supported values for this attribute are : "STANDARD8" or "RANGEINDEX"
- -
-A PAL_TYPE of "STANDARD8" defines a palette dataset such that the first -entry defines index 0, the second entry defines index 1, etc. up until -the length of the palette - 1. This assumes an image dataset with direct -indexes into the palette.
-
- -
  - - - - -
Denigrated -

If the PAL_TYPE is set to "RANGEINDEX", there will be an additional -attribute with a name of "PAL_RANGEINDEX",  (See example 2 -for more details)

- - - - - -
-
-Attribute name="PAL_RANGEINDEX"   (Denigrated)
- -
-
-The PAL_RANGEINDEX attribute contains an HDF object reference (HDF5 -datatype H5T_STD_REF_OBJ) pointer which specifies a range index array in -the file to be used for color lookups for the palette.  (Only for -PAL_TYPE="RANGEINDEX")
-
-
- -
-Attribute name="PAL_MINMAXNUMERIC"
- -
-
-If present, this attribute is an array of two numbers, of the same HDF5 -datatype as the palette elements or color numerics.
- -
They specify the minimum and maximum values of the color numeric components. -For example, if the palette was an RGB of type Float, the color numeric -range for Red, Green, and Blue could be set to be between 0.0 and 1.0. -The intensity of the color guns would then be scaled accordingly to be -between this minimum and maximum attribute.
-Attribute name="PAL_VERSION"  (Required) -
This attribute is of type H5T_C_S1, with size corresponding to the -length of the version string.  This attribute identifies the version -number of this specification to which it conforms.  The current version -is "1.2".
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Table 4. Attributes of a Palette Dataset
Attribute Name(R = Required, -
O = Optional)
TypeString SizeValue
CLASSRString -
7
-
"PALETTE"
PAL_COLORMODELRString -
3, 4, or 5
-
Color Model:  "RGB", YUV", "CMY", "CMYK", "YCbCr", or "HSV"
PAL_TYPERString -
9
- -


- - - - -
or 10
-

"STANDARD8"  - - - - -
or "RANGEINDEX" (Denigrated)
-
- - - - -
Denigrated -
RANGE_INDEX
-
- - - - -
Object Reference 
-
- - - - -
<Object Reference to Dataset of range index values>
-
PAL_MINMAXNUMERICOArray[2] of <same datatype as palette>The first value is the <Minimum value for color values>, the second -value is <Maximum value for color values>2
PAL_VERSIONRString4"1.2"
- -
  - - - - -
1.  The RANGE_INDEX attribute is required if the -PAL_TYPE is "RANGEINDEX".  Otherwise, the RANGE_INDEX attribute should -be omitted. (Range index is denigrated.)
-2.  The minimum and maximum are optional.  If not -set, the range is assumed to the maximum range of the number type.  -If one of these attributes is set, then both should be set.  The value -of the minimum must be less than or equal to the value of the maximum.
-
-Table 5 summarized the uses of the standard attributes for a palette dataset. -R means that the attribute listed on the leftmost column is Required for -the palette type on the first row, O means that the attribute is Optional -for that type and N that the attribute cannot be applied to that type. -The four first rows show the attributes that are always required  -for the two palette types. -
  -
  - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Table 5. Applicability of Attributes
PAL_TYPESTANDARD8RANGEINDEX
CLASSRR
PAL_VERSIONRR
PAL_COLORMODELRR
RANGE_INDEXNR
PAL_MINMAXNUMERICOO
- -

-2.3. Storage Layout for Palettes

-The values of the Palette are stored as a dataset.  The datatype can -be any HDF 5 atomic numeric type.  The dataset will have dimensions -(nentries  by  ncomponents), where 'nentries' -is the number of colors (usually 256) and 'ncomponents' is the -number of values per color (3 for RGB, 4 for CMYK, etc.) -
  -

-3.  Consistency and Correlation of Image and Palette -Attributes

-The objects in this specification are an extension to the base HDF5 specification -and library.  They are accessible with the standard HDF5 library, -but the semantics of the objects are not enforced by the base library.  -For example, it is perfectly possible to add an attribute called IMAGE -to any dataset, or to include an object reference to any -HDF5 dataset in a PALETTE attribute.  This would be a valid -HDF5 file, but not conformant to this specification.  The rules defined -in this specification must be implemented with appropriate software, and -applications must use conforming software to assure correctness. -

The Image and Palette specifications include several redundant standard -attributes, such as the IMAGE_COLORMODEL and the PAL_COLORMODEL.  -These attributes are informative not normative, in that it is acceptable -to attach a Palette to an Image dataset even if their attributes do not -match.  Software is not required to enforce consistency, and files -may contain mismatched associations of Images and Palettes.  In all -cases, it is up to applications to determine what kinds of images and color -models can be supported. -

For example, an Image that was created from a file with an "RGB" may -have a "YUV" Palette in its PALETTE attribute array.  This -would be a legal HDF5 file and also conforms to this specification, although -it may or may not be correct for a given application.

- - -
-
- - - -
- HDF5 documents and links 
- Introduction to HDF5 
- -
- HDF5 User's Guide 
- HDF5 Reference Manual 
- HDF5 Application Developer's Guide 
-
-
-
-
-HDF Help Desk -
-Describes HDF5 Release 1.7, the unreleased development branch; working toward HDF5 Release 1.8.0 -
- -Last modified: 8 June 2005 - - - - diff --git a/doc/html/ADGuide/Makefile.am b/doc/html/ADGuide/Makefile.am deleted file mode 100644 index fde4097..0000000 --- a/doc/html/ADGuide/Makefile.am +++ /dev/null @@ -1,18 +0,0 @@ -# HDF5 Library Doc Makefile(.in) -# -# Copyright (C) 1997, 2002 -# National Center for Supercomputing Applications. -# All rights reserved. -# -## -## Makefile.am -## Run automake to generate a Makefile.in from this file. -# - -include $(top_srcdir)/config/commence-doc.am - -localdocdir = $(docdir)/hdf5/ADGuide - -# Public doc files (to be installed)... -localdoc_DATA=Changes.html H4toH5Mapping.pdf HISTORY.txt ImageSpec.html \ - PaletteExample1.gif Palettes.fm.anc.gif RELEASE.txt diff --git a/doc/html/ADGuide/Makefile.in b/doc/html/ADGuide/Makefile.in deleted file mode 100644 index 81d0f44..0000000 --- a/doc/html/ADGuide/Makefile.in +++ /dev/null @@ -1,487 +0,0 @@ -# Makefile.in generated by automake 1.9.5 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005 Free Software Foundation, Inc. -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# HDF5 Library Doc Makefile(.in) -# -# Copyright (C) 1997, 2002 -# National Center for Supercomputing Applications. -# All rights reserved. -# -# - -srcdir = @srcdir@ -top_srcdir = @top_srcdir@ -VPATH = @srcdir@ -pkgdatadir = $(datadir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -top_builddir = ../../.. -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -INSTALL = @INSTALL@ -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ - $(top_srcdir)/config/commence-doc.am \ - $(top_srcdir)/config/commence.am -subdir = doc/html/ADGuide -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/configure.in -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(SHELL) $(top_srcdir)/bin/mkinstalldirs -CONFIG_HEADER = $(top_builddir)/src/H5config.h -CONFIG_CLEAN_FILES = -SOURCES = -DIST_SOURCES = -am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; -am__vpath_adj = case $$p in \ - $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ - *) f=$$p;; \ - esac; -am__strip_dir = `echo $$p | sed -e 's|^.*/||'`; -am__installdirs = "$(DESTDIR)$(localdocdir)" -localdocDATA_INSTALL = $(INSTALL_DATA) -DATA = $(localdoc_DATA) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) - -# Set the paths for AFS installs of autotools for Linux machines -# Ideally, these tools should never be needed during the build. -ACLOCAL = /afs/ncsa/projects/hdf/packages/automake_1.9.5/Linux_2.4/bin/aclocal -I /afs/ncsa/projects/hdf/packages/libtool_1.5.14/Linux_2.4/share/aclocal -ADD_PARALLEL_FILES = @ADD_PARALLEL_FILES@ -AMDEP_FALSE = @AMDEP_FALSE@ -AMDEP_TRUE = @AMDEP_TRUE@ -AMTAR = @AMTAR@ -AM_MAKEFLAGS = @AM_MAKEFLAGS@ -AR = @AR@ -AUTOCONF = /afs/ncsa/projects/hdf/packages/autoconf_2.59/Linux_2.4/bin/autoconf -AUTOHEADER = /afs/ncsa/projects/hdf/packages/autoconf_2.59/Linux_2.4/bin/autoheader -AUTOMAKE = /afs/ncsa/projects/hdf/packages/automake_1.9.5/Linux_2.4/bin/automake -AWK = @AWK@ -BUILD_CXX_CONDITIONAL_FALSE = @BUILD_CXX_CONDITIONAL_FALSE@ -BUILD_CXX_CONDITIONAL_TRUE = @BUILD_CXX_CONDITIONAL_TRUE@ -BUILD_FORTRAN_CONDITIONAL_FALSE = @BUILD_FORTRAN_CONDITIONAL_FALSE@ -BUILD_FORTRAN_CONDITIONAL_TRUE = @BUILD_FORTRAN_CONDITIONAL_TRUE@ -BUILD_HDF5_HL_CONDITIONAL_FALSE = @BUILD_HDF5_HL_CONDITIONAL_FALSE@ -BUILD_HDF5_HL_CONDITIONAL_TRUE = @BUILD_HDF5_HL_CONDITIONAL_TRUE@ -BUILD_PABLO_CONDITIONAL_FALSE = @BUILD_PABLO_CONDITIONAL_FALSE@ -BUILD_PABLO_CONDITIONAL_TRUE = @BUILD_PABLO_CONDITIONAL_TRUE@ -BUILD_PARALLEL_CONDITIONAL_FALSE = @BUILD_PARALLEL_CONDITIONAL_FALSE@ -BUILD_PARALLEL_CONDITIONAL_TRUE = @BUILD_PARALLEL_CONDITIONAL_TRUE@ -BUILD_PDB2HDF = @BUILD_PDB2HDF@ -BUILD_PDB2HDF_CONDITIONAL_FALSE = @BUILD_PDB2HDF_CONDITIONAL_FALSE@ -BUILD_PDB2HDF_CONDITIONAL_TRUE = @BUILD_PDB2HDF_CONDITIONAL_TRUE@ -BYTESEX = @BYTESEX@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CC_VERSION = @CC_VERSION@ -CFLAGS = @CFLAGS@ -CONFIG_DATE = @CONFIG_DATE@ -CONFIG_MODE = @CONFIG_MODE@ -CONFIG_USER = @CONFIG_USER@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CXX = @CXX@ -CXXCPP = @CXXCPP@ -CXXDEPMODE = @CXXDEPMODE@ -CXXFLAGS = @CXXFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEBUG_PKG = @DEBUG_PKG@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -DYNAMIC_DIRS = @DYNAMIC_DIRS@ -ECHO = @ECHO@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -F77 = @F77@ - -# Make sure that these variables are exported to the Makefiles -F9XMODEXT = @F9XMODEXT@ -F9XMODFLAG = @F9XMODFLAG@ -F9XSUFFIXFLAG = @F9XSUFFIXFLAG@ -FC = @FC@ -FCFLAGS = @FCFLAGS@ -FCLIBS = @FCLIBS@ -FFLAGS = @FFLAGS@ -FILTERS = @FILTERS@ -FSEARCH_DIRS = @FSEARCH_DIRS@ -H5_VERSION = @H5_VERSION@ -HADDR_T = @HADDR_T@ -HDF5_INTERFACES = @HDF5_INTERFACES@ -HID_T = @HID_T@ -HL = @HL@ -HL_FOR = @HL_FOR@ -HSIZET = @HSIZET@ -HSIZE_T = @HSIZE_T@ -HSSIZE_T = @HSSIZE_T@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -INSTRUMENT_LIBRARY = @INSTRUMENT_LIBRARY@ -LDFLAGS = @LDFLAGS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LIBTOOL = @LIBTOOL@ -LN_S = @LN_S@ -LTLIBOBJS = @LTLIBOBJS@ -LT_STATIC_EXEC = @LT_STATIC_EXEC@ -MAINT = @MAINT@ -MAINTAINER_MODE_FALSE = @MAINTAINER_MODE_FALSE@ -MAINTAINER_MODE_TRUE = @MAINTAINER_MODE_TRUE@ -MAKEINFO = @MAKEINFO@ -MPE = @MPE@ -OBJECT_NAMELEN_DEFAULT_F = @OBJECT_NAMELEN_DEFAULT_F@ -OBJEXT = @OBJEXT@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PARALLEL = @PARALLEL@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PERL = @PERL@ -PTHREAD = @PTHREAD@ -RANLIB = @RANLIB@ -ROOT = @ROOT@ -RUNPARALLEL = @RUNPARALLEL@ -RUNSERIAL = @RUNSERIAL@ -R_INTEGER = @R_INTEGER@ -R_LARGE = @R_LARGE@ -SEARCH = @SEARCH@ -SETX = @SETX@ -SET_MAKE = @SET_MAKE@ - -# Hardcode SHELL to be /bin/sh. Most machines have this shell, and -# on at least one machine configure fails to detect its existence (janus). -# Also, when HDF5 is configured on one machine but run on another, -# configure's automatic SHELL detection may not work on the build machine. -SHELL = /bin/sh -SIZE_T = @SIZE_T@ -STATIC_SHARED = @STATIC_SHARED@ -STRIP = @STRIP@ -TESTPARALLEL = @TESTPARALLEL@ -TRACE_API = @TRACE_API@ -USE_FILTER_DEFLATE = @USE_FILTER_DEFLATE@ -USE_FILTER_FLETCHER32 = @USE_FILTER_FLETCHER32@ -USE_FILTER_NBIT = @USE_FILTER_NBIT@ -USE_FILTER_SCALEOFFSET = @USE_FILTER_SCALEOFFSET@ -USE_FILTER_SHUFFLE = @USE_FILTER_SHUFFLE@ -USE_FILTER_SZIP = @USE_FILTER_SZIP@ -VERSION = @VERSION@ -ac_ct_AR = @ac_ct_AR@ -ac_ct_CC = @ac_ct_CC@ -ac_ct_CXX = @ac_ct_CXX@ -ac_ct_F77 = @ac_ct_F77@ -ac_ct_FC = @ac_ct_FC@ -ac_ct_RANLIB = @ac_ct_RANLIB@ -ac_ct_STRIP = @ac_ct_STRIP@ -am__fastdepCC_FALSE = @am__fastdepCC_FALSE@ -am__fastdepCC_TRUE = @am__fastdepCC_TRUE@ -am__fastdepCXX_FALSE = @am__fastdepCXX_FALSE@ -am__fastdepCXX_TRUE = @am__fastdepCXX_TRUE@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_vendor = @build_vendor@ -datadir = @datadir@ -exec_prefix = @exec_prefix@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ - -# Install directories that automake doesn't know about -includedir = $(exec_prefix)/include -infodir = @infodir@ -install_sh = @install_sh@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localstatedir = @localstatedir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -oldincludedir = @oldincludedir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ - -# Shell commands used in Makefiles -RM = rm -f -CP = cp - -# Some machines need a command to run executables; this is that command -# so that our tests will run. -# We use RUNTESTS instead of RUNSERIAL directly because it may be that -# some tests need to be run with a different command. Older versions -# of the makefiles used the command -# $(LIBTOOL) --mode=execute -# in some directories, for instance. -RUNTESTS = $(RUNSERIAL) - -# Libraries to link to while building -LIBHDF5 = $(top_builddir)/src/libhdf5.la -LIBH5TEST = $(top_builddir)/test/libh5test.la -LIBH5F = $(top_builddir)/fortran/src/libhdf5_fortran.la -LIBH5FTEST = $(top_builddir)/fortran/test/libh5test_fortran.la -LIBH5CPP = $(top_builddir)/c++/src/libhdf5_cpp.la -LIBH5TOOLS = $(top_builddir)/tools/lib/libh5tools.la -LIBH5_HL = $(top_builddir)/hl/src/libhdf5_hl.la -LIBH5F_HL = $(top_builddir)/hl/fortran/src/libhdf5hl_fortran.la -LIBH5CPP_HL = $(top_builddir)/hl/c++/src/libhdf5_hl_cpp.la -docdir = $(exec_prefix)/doc - -# Scripts used to build examples -H5CC = $(bindir)/h5cc -H5CC_PP = $(bindir)/h5pcc -H5FC = $(bindir)/h5fc -H5FC_PP = $(bindir)/h5pfc - -# .chkexe and .chksh files are used to mark tests that have run successfully. -MOSTLYCLEANFILES = *.chkexe *.chksh -localdocdir = $(docdir)/hdf5/ADGuide - -# Public doc files (to be installed)... -localdoc_DATA = Changes.html H4toH5Mapping.pdf HISTORY.txt ImageSpec.html \ - PaletteExample1.gif Palettes.fm.anc.gif RELEASE.txt - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/config/commence-doc.am $(top_srcdir)/config/commence.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \ - && exit 0; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign doc/html/ADGuide/Makefile'; \ - cd $(top_srcdir) && \ - $(AUTOMAKE) --foreign doc/html/ADGuide/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -mostlyclean-libtool: - -rm -f *.lo - -clean-libtool: - -rm -rf .libs _libs - -distclean-libtool: - -rm -f libtool -uninstall-info-am: -install-localdocDATA: $(localdoc_DATA) - @$(NORMAL_INSTALL) - test -z "$(localdocdir)" || $(mkdir_p) "$(DESTDIR)$(localdocdir)" - @list='$(localdoc_DATA)'; for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - f=$(am__strip_dir) \ - echo " $(localdocDATA_INSTALL) '$$d$$p' '$(DESTDIR)$(localdocdir)/$$f'"; \ - $(localdocDATA_INSTALL) "$$d$$p" "$(DESTDIR)$(localdocdir)/$$f"; \ - done - -uninstall-localdocDATA: - @$(NORMAL_UNINSTALL) - @list='$(localdoc_DATA)'; for p in $$list; do \ - f=$(am__strip_dir) \ - echo " rm -f '$(DESTDIR)$(localdocdir)/$$f'"; \ - rm -f "$(DESTDIR)$(localdocdir)/$$f"; \ - done -tags: TAGS -TAGS: - -ctags: CTAGS -CTAGS: - - -distdir: $(DISTFILES) - $(mkdir_p) $(distdir)/../../../config - @srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's|.|.|g'`; \ - list='$(DISTFILES)'; for file in $$list; do \ - case $$file in \ - $(srcdir)/*) file=`echo "$$file" | sed "s|^$$srcdirstrip/||"`;; \ - $(top_srcdir)/*) file=`echo "$$file" | sed "s|^$$topsrcdirstrip/|$(top_builddir)/|"`;; \ - esac; \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - dir=`echo "$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test "$$dir" != "$$file" && test "$$dir" != "."; then \ - dir="/$$dir"; \ - $(mkdir_p) "$(distdir)$$dir"; \ - else \ - dir=''; \ - fi; \ - if test -d $$d/$$file; then \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \ - fi; \ - cp -pR $$d/$$file $(distdir)$$dir || exit 1; \ - else \ - test -f $(distdir)/$$file \ - || cp -p $$d/$$file $(distdir)/$$file \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: - for dir in "$(DESTDIR)$(localdocdir)"; do \ - test -z "$$dir" || $(mkdir_p) "$$dir"; \ - done -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - `test -z '$(STRIP)' || \ - echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install -mostlyclean-generic: - -test -z "$(MOSTLYCLEANFILES)" || rm -f $(MOSTLYCLEANFILES) - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." -clean: clean-am - -clean-am: clean-generic clean-libtool mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic distclean-libtool - -dvi: dvi-am - -dvi-am: - -html: html-am - -info: info-am - -info-am: - -install-data-am: install-localdocDATA - -install-exec-am: - -install-info: install-info-am - -install-man: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic mostlyclean-libtool - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: uninstall-info-am uninstall-localdocDATA - -.PHONY: all all-am check check-am clean clean-generic clean-libtool \ - distclean distclean-generic distclean-libtool distdir dvi \ - dvi-am html html-am info info-am install install-am \ - install-data install-data-am install-exec install-exec-am \ - install-info install-info-am install-localdocDATA install-man \ - install-strip installcheck installcheck-am installdirs \ - maintainer-clean maintainer-clean-generic mostlyclean \ - mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ - uninstall uninstall-am uninstall-info-am \ - uninstall-localdocDATA - - -# Ignore most rules -lib progs check test _test check-p check-s: - @echo "Nothing to be done" - -tests dep depend: - @@SETX@; for d in X $(SUBDIRS); do \ - if test $$d != X; then \ - (cd $$d && $(MAKE) $(AM_MAKEFLAGS) $@) || exit 1; \ - fi; - done - -# In docs directory, install-doc is the same as install -install-doc install-all: - $(MAKE) $(AM_MAKEFLAGS) install -uninstall-doc uninstall-all: - $(MAKE) $(AM_MAKEFLAGS) uninstall -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/doc/html/ADGuide/PaletteExample1.gif b/doc/html/ADGuide/PaletteExample1.gif deleted file mode 100755 index 8694d9d..0000000 Binary files a/doc/html/ADGuide/PaletteExample1.gif and /dev/null differ diff --git a/doc/html/ADGuide/Palettes.fm.anc.gif b/doc/html/ADGuide/Palettes.fm.anc.gif deleted file mode 100755 index d344c03..0000000 Binary files a/doc/html/ADGuide/Palettes.fm.anc.gif and /dev/null differ diff --git a/doc/html/ADGuide/RELEASE.txt b/doc/html/ADGuide/RELEASE.txt deleted file mode 100644 index 0e58c12..0000000 --- a/doc/html/ADGuide/RELEASE.txt +++ /dev/null @@ -1,906 +0,0 @@ -HDF5 version 1.7.48 released on Mon Jul 18 16:18:26 CDT 2005 -================================================================================ - - -INTRODUCTION - -This document describes the differences between HDF5-1.6.* and -HDF5-1.7.*, and contains information on the platforms tested and -known problems in HDF5-1.7.*. For more details check the HISTORY.txt -file in the HDF5 source. - -The HDF5 documentation can be found on the NCSA ftp server -(ftp.ncsa.uiuc.edu) in the directory: - - /HDF/HDF5/docs/ - -For more information look at the HDF5 home page at: - - http://hdf.ncsa.uiuc.edu/HDF5/ - -If you have any questions or comments, please send them to: - - hdfhelp@ncsa.uiuc.edu - -CONTENTS - -- New Features -- Support for new platforms and languages -- Bug Fixes since HDF5-1.6.0 -- Platforms Tested -- Known Problems - - -New Features -============ - - Configuration: - -------------- - - When make is invoked in parallel (using -j), sequential tests - are now executed simultaneously. This should make them execute - more quickly on some machines. - Also, when tests pass, they will create a foo.chkexe file. - This prevents the test from executing again until the test or - main library changes. - - On windows, all.zip is deprecated. users should - read INSTALL_Windows.txt to know the details. - Reasons to deprecate all.zip: - 1. Avoid confliction for windows programmers - 2. Decrease size of CVS tree by adding all.zip - 3. Avoid using winzip as the intermediate step - --KY 2005/04/22 - - When HDF5 is created as a shared library, it now uses libtool's - shared library versioning scheme. -JML 2005/04/18 - - HDF5 now uses automake 1.9.5 to generate Makefiles.in. - This has a number of effects on users: - The Fortran compiler should be set using the environment - variable $FC, not $F9X. F9X still works, but is depreciated. - The output of make may be different. This should be only a - cosmetic effect. - make depened (or make dep) is no longer recognized, since automake - handles dependency tracking. - Some new configure options exist. --enable-dependency-tracking - and --disable-dependency-tracking are used to control automake's - dependency tracking. Dependencies are on by default *on most - platforms and compilers*. If --enable-dependency-tracking is - used, they will be enabled on any platform. However, this can - slow down builds or even cause build errors in some cases. - Likewise, --disable-dependency-tracking can speed up builds and - avoid some build errors. - Some make targets have alternate names. make check-install and - make installcheck do the same thing, for instance. - pmake on IRIX can be invoked from the root directory, but the - -V flag must be used to invoke it in any subdirectory or it - will give an error about undefined variables. - JML 2005/01 - 2005/03 - - Hardware conversion between long double and integers is also added. - SLU 2005/02/10 - - Started to support software conversion between long double and - integers. Hardware conversion will come very soon. SLU - 2005/1/6 - - Intel v8.0 compiler would infinite loop when compiling some test - code with -O3 option. Changed enable-production default compiler - option to -O2. AKC - 2004/12/06 - - Long double is assumed to be a supported C data type. It is a - stanadard C89 type. AKC - 2004/10/22 - - The IA64 will use ecc as the C++ compiler by default. - - Added some initial support for making purify (or similar memory - checking products) happier by initializing buffers to zero and - disabling the internal free list code. To take advantage of this, - define 'H5_USING_PURIFY' in your CFLAGS when building the library. - QAK - 2004/07/23 - - Fixed the long compile time of H5detect.c when v7.x Intel Compiler - is used with optimization NOT off. AKC - 2004/05/20 - - Fixed configure setting of C++ for OSF1 platform. AKC - 2004/01/06 - - Prefix default is changed from /usr/local to `pwd`/hdf5. - AKC - 2003/07/09 - - Library: - -------- - - Added H5F_OBJ_LOCAL flag to H5Fget_obj_count() & H5Fget_obj_ids(), to - allow querying for objects in file that were opened with a particular - file ID, instead of all objects opened in file with any file ID. - QAK - 2005/06/01 - - Added H5T_CSET_UTF8 character set to mark datatypes that use the - UTF-8 Unicode character encoding. Added tests to ensure that - library handles UTF-8 object names, attributes, etc. -JL 2005/05/13 - - HDF5 supports collective MPI-IO for irregular selection with HDF5 - dataset. Irregular selection is when users use H5Sselect_hyperslab - more than once for the same dataset. - Currently, not all MPI-IO packages support complicated MPI derived - datatype used in the implementation of irregular - selection INSIDE HDF5. - 1) DEC 5.x is not supporting complicated derived datatype. - 2) For AIX 5.1 32-bit, - if your poe version number is 3.2.0.19 or lower, - please edit powerpc-ibm-aix5.x under hdf5/config, - Find the line with - << hdf5_mpi_complex_derived_datatype_works>> - and UNCOMMENT this line before the configure. - check poe version with the following command: - lpp -l all | grep ppe.poe - For AIX 5.1 64-bit, - regardless of poe version number, please UNCOMMENT - << hdf5_mpi_complex_derived_datatype_works>> under hdf5/config. - We suspect there are some problems for MPI-IO implementation - for 64-bit. - 3) For Linux cluster, - if mpich version is 1.2.5 or lower, collective irregular selection - IO is not supported, internally independent IO is used. - 4) For IRIX 6.5, - if C compiler version is 7.3 or lower, collective irregular selection - IO is not supported, internally independent IO is used. - KY - 2005/07/13 - - HDF5 N-bit filter - HDF5 support N-bit filter from this version, - The N-Bit filter is used effectively for compressing data of N-Bit - datatype as well as compound and array datatype with N-Bit fields. - KY - 2005/04/15 - - HDF5 scaleoffset filter - HDF5 supports scaleoffset filter for users to do data - compression through HDF5 library. - Scale-Offset compression performs a scale and/or offset operation - on each data value and truncates the resulting value to a minimum - number of bits and then stores the data. - Scaleoffset filter supports floating-point and integer datatype. - Please check the HDF5 reference manual for this. - KY - 2005/06/06 - - Retired SRB vfd (--with-srb). Functions H5Pset_fapl_srb and - H5Pget_fapl_srb were removed. EIP - 2005/04/07 - - Retired GASS vfd (--with-gass). Functions H5Pset_fapl_gass and - H5Pget_fapl_gass are removed too. AKC - 2005/3/3 - - Pablo was removed from the source code EIP - 2005/01/21 - - Modified registration of SZIP to dynamically detect the presence - or absence of the encoder. Changed configure and Makefiles, - and tests to dynamically detect encoder. BEM - 2004/11/02 - - Added function H5Pget_data_transform, together with the previously - added H5Pset_data_transform, to support the data transform - feature. AKC - 2004/10/26 - - Compound datatype has been enhanced with a new feature of size - adjustment. The size can be increased and decreased(without - cutting the last member) as long as it doesn't go down to zero. - No API change is involved. SLU - 2004/10/1 - - Put back 6 old error API functions to be backward compatible with - version 1.6. They are H5Epush, H5Eprint, H5Ewalk, H5Eclear, - H5Eset_auto, H5Eget_auto. Their new equivalent functions are - called H5Epush_stack, H5Eprint_stack, H5Ewalk_stack, - H5Eclear_stack, H5Eset_auto_stack, H5Eget_auto_stack. SLU - - 2004/9/2 - - 4 new API functions, H5Tencode, H5Tdecode, H5Sencode, H5Sdecode were - added to the library. Given object ID, these functions encode and - decode HDF5 objects(data type and space) information into and from - binary buffer. SLU - 2004/07/21 - - Modified the way how HDF5 calculates 'pixels_per_scanline' parameter for - SZIP compression. Now there is no restriction on the size and shape of the - chunk except that the total number of elements in the chunk cannot be - bigger than 'pixels_per_block' parameter provided by the user. - EIP - 2004/07/21 - - Added support for SZIP without encoder. Added H5Zget_filter_info - and changed H5Pget_filter and H5Pget_filter_by_id to support this - change. JL/NF - 2004/06/30 - - SZIP always uses K13 compression. This flag no longer needs to - be set when calling H5Pset_szip. If the flag for CHIP - compression is set, it will be ignored (since the two are mutually - exclusive). JL/NF - 2004/6/30 - - A new API function H5Fget_name was added. It returns the name - of the file by object(file, group, data set, named data type, - attribute) ID. SLU - 2004/06/29 - - Added support for user defined identifier types. NF/JL - 2004/06/29 - - A new API function H5Fget_filesize was added. It returns the - actual file size of the opened file. SLU - 2004/06/24 - - New Feature of Data transformation is added. AKC - 2004/05/03. - - New exception handler for datatype conversion is put in to - replace the old overflow callback function. This exception - handler is set through H5Pset_type_conv_cb function. - SLU - 2004/4/27 - - Added option that if $HDF5_DISABLE_VERSION_CHECK is set to 2, - will suppress all library version mismatch warning messages. - AKC - 2004/4/14 - - A new type of dataspace, null dataspace(dataspace without any - element) was added. SLU - 2004/3/24 - - Data type conversion(software) from integer to float was added. - SLU - 2004/3/13 - - Data type conversion(software) from float to integer was added. - Conversion from integer to float will be added later. - SLU -2004/2/4 - - Added new H5Premove_filter routine to remove I/O pipeline filters - from dataset creation property lists. PVN - 2004/01/26 - - Added new 'compare' callback parameter to H5Pregister & H5Pinsert - routines. QAK - 2004/01/07 - - Data type conversion(hardware) between integers and floats was added. - SLU 2003/11/21 - - New function H5Iget_file_id() was added. It returns file ID given - an object(dataset, group, or attribute) ID. SLU 2003/10/29 - - Added new fields to the H5G_stat_t for more information about an - object's object header. QAK 2003/10/06 - - Added new H5Fget_freespace() routine to query the free space in a - given file. QAK 2003/10/06 - - Added backward compatability with v1.6 for new Error API. SLU - - 2003/09/24 - - Changed 'objno' field in H5G_stat_t structure from 'unsigned long[2]' - to 'haddr_t'. QAK - 2003/08/08 - - Changed 'fileno' field in H5G_stat_t structure from 'unsigned long[2]' - to 'unsigned long'. QAK - 2003/08/08 - - Changed 'hobj_ref_t' type from structure with array field to 'haddr_t'. - QAK - 2003/08/08 - - Object references (hobj_ref_t) can now be compared with the 'objno' - field in the H5G_stat_t struct for testing if two objects are the - same within a file. QAK - 2003/08/08 - - Switched over to new error API. SLU - 2003/07/25 - - Parallel Library: - ----------------- - - Allow compressed, chunked datasets to be read in parallel. - QAK - 2004/10/04 - - Add options of using atomicity and file-sync to test_mpio_1wMr. - AKC - 2003/11/13 - - Added parallel test, test_mpio_1wMr, which tests if the - underlaying parallel I/O system is conforming to the POSIX - write/read requirement. AKC - 2003/11/12 - - Fortran Library: - ---------------- - - added missing h5tget_member_class_f function - EIP 2005/04/06 - - added new functions h5fget_name_f and h5fget_filesize_f - EIP 2004/07/08 - - h5dwrite/read_f and h5awrite/read_f functions only accept dims parameter - of the type INTEGER(HSIZE_T). - - added support for native integers of 8 bytes (i.e. when special - compiler flag is specified to set native fortran integers to 8 bytes, - for example, -i8 flag for PGI and Absoft Fortran compilers, - -qintsize=8 flag for IBM xlf compiler). - EIP 2005/06/20 - - - Tools: - ------ - - new tool, h5jam. See reference manual. 2004/10/08 - - h5repack.sh did not report errors encountered during tests. It does - now. AKC - 2004/04/02 - - Added the MPI-I/O and MPI-POSIX drivers to the list of VFL drivers - available for h5dump and h5ls. RPM & QAK - 2004/02/01 - - Added option --vfd= to h5ls to allow a VFL driver to be selected - by a user. RPM & QAK - 2004/02/01 - - Added option -showconfig to compiler tools (h5cc,h5fc,h5c++). - AKC - 2004/01/08 - - Install the "h5cc" and "h5fc" tools as "h5pcc" and "h5pfc" - respectively if library is built in parallel mode. - WCW - 2003/11/04 - - Added metadata benchmark (perform/perf_meta). SLU - 2003/10/03 - - Changed output of "OID"s from h5dump from "-" to - ":::" to ":" - QAK - 2003/08/08 - - High-Level APIs: - ------ - - Added Packet Table API for creating tables with less overhead than - H5TB API. Added C++ wrapper for Packet Tables. See documentation. - JML - 2004/03/28 - - -Support for new platforms, languages and compilers. -======================================= - - PGI Fortran compiler is supported on Linux64 systems (x86_64) - EIP - 2004/08/19 - - Absoft compiler f95 v9.0 supported on Linux 2.4 - EIP - 2004/07/29 - - HDF5 Fortran APIs are supported on Mac OSX with IBM XL Fortran - compiler version 8.1. This is a default compiler. - - HDF5 Fortran APIs are supported on MAC OSX with Absoft F95 compiler - version 8.2; set F9X environment varibale to f95, for example - setenv F9X f95 - Use --disable-shared --enable-static configure flags when Absoft - compiler is used. - EIP - 2004/07/27 - - HDF5 Fortran APIs are supported on MAC OSX with IBM XL Fortran - Compiler version 8.1 Use "--disable-shared --enable-static" - configure flags along with the "--enable-fortran" flag to build - Fortran library. EIP - 2004/01/07 - -Bug Fixes since HDF5-1.6.0 release -================================== - - Library - ------- - - More bug fixes on holding open files that are mounted and have - IDs open. QAK - 2005/07/14 - - Don't unmount child files until the parent file actually closes. - (Previously, if an object is holding open a file, the child files - would get unmounted too early). QAK - 2005/07/05 - - Fixed bug where unmounted files could cause the library to go into - an infinite loop when shutting down. QAK - 2005/06/30 - - The library didn't save the information of family driver in file. - The original file member size was lost after file was closed (see - bug #213). This has been fixed by saving driver name and member - file size in the superblock. SLU - 2005/6/24 - - Fixed bug with hyperslab selections that use selection offsets and - operate on chunked datasets going into infinite loop or dumping - core. QAK - 2005/06/17 - - Corrected memory leak and possible corruption when opening a group. - QAK - 2005/06/17 - - Added check for opaque datatype tags being too long (check against - H5T_OPAQUE_TAG_MAX, currently set to 256). QAK - 2005/06/14 - - Fixed various errors in maintaining names for open objects in the - face of unusual mount & unmount operations. QAK - 2005/06/08 - - "SEMI" and "STRONG" file close degree settings now apply only to the - particular file ID being closed, instead of operating on all open - file IDs for a given file. QAK - 2005/06/01 - - For family driver, the library didn't save member size in file. - When file is reopened, the size of 1st member file determine the - member size. Now member size is saved in file and is used to - define member file size. Wrong file access property of member size - will result in a failure. Using any other driver except family - will cause library to return error. So is multi driver. SLU - - 2005/05/24 - - Fixed error in opening object in group that was opened in mounted - file which has been unmounted. QAK - 2005/03/17 - - Fixed a racing condition in MPIPOSIX virtual file drive close - function. Now all processes must completed the close before any - of them is returned. This prevents some "faster" processes start - accessing the file for another purpose (e.g., open with truncate) - while other "slower" processes have not closed the same file with - the previous purpose. AKC - 2005/03/01 - - H5Tget_member_value calls for enum datatype didn't return correct - value if H5Tenum_valueof was called first. It's fixed. SLU - - 2005/02/08 - - For variable-length string, H5Tget_class returned H5T_STRING as its - class. But H5Tdetect_class and H5Tget_member_class considered it - as H5T_VLEN. This is fixed to let all these 3 functions treat it - as H5T_STRING. SLU - 2005/02/08 - - The byte order of 1-byte integer types was fixed as little endian - even on a big-endian machine. This has been corrected. SLU - - 2005/02/07 - - Fix segmentation fault when calling H5Fflush with an attribute that - hasn't had a value written to it open. QAK - 2004/10/18 - - Back up supporting bitfield and time types in H5Tget_native_type. - Leave it to future support. The function simply returns error - message of "not support" for bitfield and time types. - SLU - 2004/10/5 - - Fixed address check in Core VFL driver to avoid spurious address/size - overflows for odd valued addresses and/or sizes. QAK - 2004/09/27 - - Fixed parallel bug in which some processes attempted collective - I/O while others did independent I/O. Bug appeared when some - processes used point selections, and others didn't. JRM - 2004/9/15 - - Corrected error where dataset region references were written in an - incorrect way on Cray machines. PVN & QAK - 2004/09/13 - - The H5Tget_native_type now determines the native type for integers - based on the precision. This is to avoid cases of wrongly converting - an int to a short in machines that have a short of 8 bytes but with - 32bit precision (e.g Cray SV1). PVN - 2004/09/07 - - Changed H5Dread() to not overwrite data in an application's buffer - with garbage when accessing a chunked dataset with an undefined - fill value and an unwritten chunk is uncountered. QAK - 2004/08/25 - - Fixed error which could cause a core dump when a type conversion - routine was registered after a compound datatype had been - converted and then an equivalment compound datatype was converted - again. QAK - 2004/08/07 - - Fixed memory overwrite when encoding "multi" file driver information - for file's superblock. QAK - 2004/08/05 - - Fixed obscure bug where a filter which failed during chunk allocation - could allow library to write uncompressed data to disk but think - the data was compressed. QAK - 2004/07/29 - - Fixed bug where I/O to an extendible chunked dataset with zero-sized - dimensions would cause library to fail an assertion. - QAK - 2004/07/27 - - Fixed bug where chunked datasets which have filters defined, - allocation time set to "late" and whose chunks don't align with - the dataspace bounds could have incorrect data stored when - overwriting the entire dataset on the first write. QAK - 2004/07/27 - - Added check to ensure that dataspaces have extents set. JML-2004/07/26 - - Fixed bug on some Solaris systems where HDF5 would try to use - gettimeofday() when that function didn't work properly. - JML - 2004/07/23 - - Fixed bug in H5Sset_extent_simple where setting maximum size to - non-zero, then to zero would cause an error. JML - 2004/07/20 - - Allow NULL pointer for buffer parameter to H5Dread & H5Dwrite - when not writing data ("none" selection or hyperslab or point - selection with no elements defined). QAK - 2004/07/20 - - Calling H5Gcreate() on "/" or "." throws an error instead of - failing quietly. JML - 2004/07/19 - - Fixed bug where setting file address size to be very small could - trigger an assert if the file grew to more than 64 KB. Now throws - an error and data can be recovered. JL/NF - 2004/07/14 - - Fixed bug where "resurrecting" a dataset was failing. - QAK - 2004/07/14 - - Fixed bug where incorrect data could be read from a chunked dataset - after it was extended. QAK - 2004/07/12 - - Fixed failure to read data back from file of compound type with - variable-length string as field. SLU - 2004/06/10 - - Fixed potential file corruption bug when a block of metadata could - overlap the end of the internal metadata accumulator buffer and - the buffer would be extended correctly, but would incorrectly - change it's starting address. QAK - 2004/06/09 - - Opaque datatype with no tag failed for some operations. Fixed. - SLU - 2004/6/3 - - Fixed potential file corruption bug where dimensions that were - too large (a value greater than could be represented in 32-bits) - could cause the incorrect amount of space to be allocated in a - file for the raw data for the dataset. QAK - 2004/06/01 - - Fixed dtypes "sw long double -> double" failure in QSC class - machines. AKC - 2004/4/16 - - Fixed problem with fletcher32 filter when converting data of different - endianess. PVN - 2004/03/10 - - Fixed problem with H5Tget_native_type() not handling opaque fields - correctly. QAK - 2004/01/31 - - Fixed several errors in B-tree deletion code which could cause a - B-tree (used with groups and chunked datasets) to become corrupt - with the right sequence of deleted objects. QAK - 2004/01/19 - - Fixed small internal memory leaks of fill-value information. - QAK - 2004/01/13 - - Fixed bug that caused variable-length datatypes (strings or sequences) - used for datasets in files with objects that were unlinked to - fail to be read/written to a file. QAK - 2004/01/13 - - Detect situation where szip 'pixels per block' is larger than the - fastest changing dimension of a dataset's chunk size and disallow - this (due to limits in szip library). QAK - 2003/12/31 - - Fixed bug with flattened hyperslab selections that would generate - incorrect hyperslab information with certain high-dimensionality - combinations of start/stride/count/block information. - QAK - 2003/12/31 - - Fixed bug with variable-length datatypes used in compound datatypes. - SLU - 2003/12/29 - - Fixed bug in parallel I/O routines that would cause reads from - "short datasets" (datasets which were only partially written out) - to return invalid data. QAK & AKC - 2003/12/19 - - Fixed bug where scalar dataspaces for attributes were reporting as - simple dataspaces. QAK - 2003/12/13 - - Fixed problem with selection offsets of hyperslab selections in - chunked datasets causing the library to go into an infinite loop. - QAK - 2003/12/13 - - Fixed H5Giterate to avoid re-using index parameter after iteration - callback has been called (allows iteration callback to modify the - index parameter itself). QAK - 2003/12/06 - - Fixed various floating-point conversion problems, including a - change which could corrupt data when converting from double->float. - QAK - 2003/11/24 - - Changed "single process" metadata writing in library to collective - I/O by all processes, in order to guarantee correct data being - written with MPI-I/O. QAK - 2003/11/20 - - Fixed problems with fill values and variable-length types and also - I/O on VL values that were set to NULL. QAK - 2003/11/08 - - Fixed problems with MPI datatypes that caused ASCI Q machine to - hang. QAK - 2003/10/28 - - Removed HDF5_MPI_PREFER_DERIVED_TYPES environment variable support, - since it had no benefit. QAK - 2003/10/28 - - Single hyperslab selections (which were set with only one call to - H5Sselect_hyperslab) that had dimensions that could be "flattened" - but were interspersed with dimensions that could not be flattened - were not correctly handled, causing core dumps. QAK - 2003/10/25 - - Fixed incorrect datatype of the third parameter to the Fortran90 - h5pset(get)_cache_f subroutine (INTEGER to INTEGER(SIZE_T)) - EIP - 2003/10/13 - - Fixed problems with accessing variable-length data datatypes on - Crays. QAK - 2003/10/10 - - Fixed potential file corruption bug when too many object header - messages (probably attributes, from a user perspective) were - inserted into an object header and certain other conditions were - met. QAK - 2003/10/08 - - Changed implementation of internal ID searching algorithm to avoid - O(n) behavior for many common cases. QAK - 2003/10/06 - - Allow partial parallel writing to compact datasets. QAK - 2003/10/06 - - Correctly create reference to shared datatype in attribute, instead - of making a copy of the shared datatype in the attribute. - QAK - 2003/10/01 - - Revert changes which caused files >2GB to fail when created with - MPI-I/O file driver on certain platforms. QAK - 2003/09/16 - - Allow compound datatypes to grow in size. SLU - 2003/09/10 - - Detect if a type is already packed before attempting to pack it - again or check if it is locked. SLU - 2003/09/10 - - Corrected bug when opening a file twice with read-only permission - for one open and then closing the read-only access file ID would - generate an error. QAK - 2003/09/10 - - Corrected bug in repeated calls to H5Pget_access_plist() which would - incorrectly manage reference counts of internal information and - eventually blow up. QAK - 2003/09/02 - - Return rank of the array datatype on successful call to - H5Tget_array_dims(). QAK - 2003/08/30 - - Corrected bug in H5Tdetect_class which was not correctly detecting - datatype classes of fields in nested compound datatypes in some - circumstances. QAK - 2003/08/30 - - Corrected bug in sieve buffer code which could cause loss of data - when a small dataset was created and deleted in quick succession. - QAK - 2003/08/27 - - Corrected bug in H5Gget_objname_by_idx which was not allowing NULL - for the name when just querying for the object name's length. - QAK - 2003/08/25 - - Corrected bug in variable-length string handling which could - generate a core dump on writing variable-length strings as part - of a compound datatype on certain architectures. QAK - 2003/08/25 - - Corrected bug in H5Tget_native_type which would incorrectly compute - the size of certain compound datatypes and also incorrectly - compute the offset of the last field for those compound datatypes. - QAK - 2003/08/25 - - Corrected bug in H5Tget_native_type which would drop string datatype - metadata (padding, etc.) QAK - 2003/08/25 - - Corrected bugs in H5Gget_num_objs, H5Gget_objname_by_idx and - H5Gget_objtype_by_idx to allow them to accept location IDs, not just - group IDs. QAK - 2003/08/21 - - Corrected bug when using scalar dataspace for memory selection and - operating on chunked dataset. QAK - 2003/08/18 - - Corrected bugs with multiple '/' characters in names for H5Glink - and H5Gunlink. QAK - 2003/08/16 - - Corrected bug with user blocks that didn't allow a user block to - be inserted in front of a file after the file was created. - QAK - 2003/08/13 - - Corrected errors with using point selections to access data in - chunked datasets. QAK - 2003/07/23 - - Corrected error with variable-length datatypes and chunked datasets - caused H5Dwrite to fail sometimes. QAK - 2003/07/19 - - Modified library and file format to support storing indexed storage - (chunked dataset) B-tree's with non-default internal 'K' values. - QAK - 2003/07/15 - - Returned H5T_BKG_TEMP support to library after it was accidentally - removed. QAK - 2003/07/14 - - Configuration - ------------- - - Parallel I/O with the MPI-I/O driver will no longer work if the - filesystem is not POSIX compliant. The "HDF5_MPI_1_METAWRITE" - environment variable has been removed. QAK - 2004/01/30 - - Fixed the error that cause "make install" to fail because of the - macro definition syntax of "prefix?=..." AKC - 2003/07/22 - - Performance - ------------- - - Optimized I/O for enumerated datatypes that are a superset of source - enumerated datatype. QAK - 2005/03/19 - - More optimizations to inner loops of datatype conversions for - integers and floats which give a 10-50% speedup. QAK - 2003/11/07 - - Hoisted invariant 'if/else's out of inner datatype conversion loop for - integer and floating-point values, giving about a 20% speedup. - QAK - 2003/10/20 - - Tools - ----- - - Fixed h5dump to print attributes data in ASCII if -r option is used. - AKC - 2004/11/18 - - Fixed space utilization reported in h5ls to correct error in formula - used. QAK - 2004/10/22 - - Fixed h5redeploy which sometimes complain too many argument for the - test command. (The complain did not hinder the h5redploy to - proceed correctly.) AKC - 2003/11/03 - - Fixed a segmentation fault of h5diff when percentage option is used. - AKC - 2003/08/27 - - Switched away from tools using internal "fixtype" function(s) to use - H5Tget_native_type() internally. QAK - 2003/08/25 - - Documentation - ------------- - - F90 APIs - -------- - - h5pget_driver_f was returning information that could not be - interpreted by fortran application program; fixed. EIP - 2005/04/10 - - -Platforms Tested -================ - - AIX 5.1 (32 and 64-bit) xlc 6.0.0.2 - xlf 8.1.0.3 - xlC 6.0.0.4 - xlc 5.0.2.5 - xlf 7.1.1.2 - xlC 5.0.2.5 - mpcc_r 5.0.2.5 - mpxlf_r 7.1.1.2 - poe 3.2.0.10 - Cray T3E sn6606 2.0.6.08 Cray Standard C Version 6.6.0.2 - Cray Fortran Version 3.6.0.0.2 - mpt 2.2.0.0 - Cray SV1 sn9617 10.0.1.2 Cray Standard C Version 6.6.0.2 - mpt 2.2.0.0 - Cray Fortran Version 3.6.0.0.2 - Cray T90IEEE 10.0.1.01y Cray Standard C Version 6.4.0.2.3 - Cray Fortran Version 3.4.0.3 - mpt 2.1.0.0 - FreeBSD 4.9 gcc 2.95.4 - g++ 2.95.4 - HP-UX B.11.00 HP C HP92453-01 A.11.01.20 - HP F90 v2.4 - HP ANSI C++ B3910B A.03.13 - MPIch 1.2.4 - IRIX 6.5 MIPSpro cc 7.30 - IRIX64 6.5 (64 & n32) MIPSpro cc 7.3.1.3m - F90 MIPSpro 7.3.1.3m (64 only) - MPIch 1.2.4 - Linux 2.4.18 gcc 2.96, 3.2.2, 3.2.3 - g++ 3.2.2, 3.2.3 - Intel(R) C++ Version 7.1 - Intel(R) Fortran Compiler Version 7.1 - PGI compilers (pgcc, pgf90, pgCC) version 5.0-2 - MPIch 1.2.4 - Absoft Fortran v9.0 - OSF1 V5.1 Compaq C V6.4-014 - Compaq C V6.3-027 - Compaq Fortran V5.5-1877 - Compaq C++ V6.5-014 - MPI_64bit_R5 - g++ version 3.0 for C++ - SunOS 5.7 WorkShop Compilers 5.0 98/12/15 C 5.0 - (Solaris 2.7) WorkShop Compilers 5.0 98/12/15 C++ 5.0 - WorkShop Compilers 5.0 98/10/25 - FORTRAN 90 2.0 Patch 107356-04 - SunOS 5.8 32,46 Sun WorkShop 6 update 2 C 5.3 - (Solaris 2.8) Sun WorkShop 6 update 2 Fortran 90 - Sun WorkShop 6 update 2 C++ 5.3 - SunOS 5.9 32,64 Sun C 5.6 2004/07/15 - (Solaris 2.9) Sun Fortran 95 8.0 2004/07/15 - Sun C++ 5.6 2004/07/15 - TFLOPS r1.0.4 v4.3.3 i386 pgcc Rel 3.1-4i with mpich-1.2.4 with - local modifications - IA-32 Linux 2.4.9 gcc 2.96 - Intel(R) C++ Version 7.0 - Intel(R) Fortran Compiler Version 7.0 - - IA-64 Linux 2.4.16 ia64 gcc version 2.96 20000731 - Intel(R) C++ Version 7.0 - Intel(R) Fortran Compiler Version 7.0 - Windows XP MSVC++ 6.0 - MSVC++ .NET 2003, - Intel C++ Version 8.1 - DEC Visual Fortran 6.6c - MAC OS X Darwin 6.5 - gcc and g++ Apple Computer, Inc. GCC - version 1161, based on gcc version 3.1 - IBM XL Fortran compiler version 8.1 - Absoft Fortran v8.2 - - - -Supported Configuration Features Summary -======================================== - - In the tables below - y = tested and supported - n = not supported or not tested in this release - x = not working in this release - dna = does not apply - ( ) = footnote appears below second table - -Platform C C F90 F90 C++ Shared zlib - parallel parallel libraries (4) -Solaris2.7 64-bit y y (1) y y (1) y y y -Solaris2.7 32-bit y y (1) y y (1) y y y -Solaris2.8 64-bit y y (1) y y (1) y y y -Solaris2.8 32-bit y y y y (1) y y y -IRIX6.5 y y (1) n n n y y -IRIX64_6.5 64-bit y y (2) y y y y y -IRIX64_6.5 32-bit y y (2) n n n y y -HPUX11.00 y y (1) y y y y y -OSF1 v5.1 y y y y y y y -T3E y y (5) y y (5) n n y -SV1 y y (5) y y (5) n n y -T90 IEEE y y (5) y y (5) n n y -TFLOPS n y (1) n n n n y -AIX-5.1 32-bit y y y y y n y -AIX-5.1 64-bit y y y y y n y -WinXP (6) y n n n y y y -WinXP Intel y n n n y y y -WinNT CW y n n n n n y -Mac OS X 10.2 y n n n y y y -FreeBSD y y (1) n n y y y -Linux 2.4 gcc (3) y y (1) y (PGI) n y y y -Linux 2.4 Intel (3) y n y n y n y -Linux 2.4 PGI (3) y n y n y n y -Linux 2.4 IA32 Intel y n y n y n y -Linux 2.4 IA64 Intel y n y n y n y - - -ASCII Table 2 -- for RELEASE.txt - -Platform static- Thread- SZIP GASS STREAM- High-level H4/H5 - exec safe VFD APIs tools (7) -Solaris2.7 64-bit x y y n y y n -Solaris2.7 32-bit x y y n y y y -Solaris2.8 64-bit x y y n y y n -Solaris2.8 32-bit x y y n y y y -IRIX6.5 x n y n y y y -IRIX64_6.5 64-bit x y y y y y y -IRIX64_6.5 32-bit x y y y y y y -HPUX11.00 x n y n y y y -OSF1 v5.1 y n y n y y y -T3E y n n n y y y -SV1 y n n n y y y -T90 IEEE y n n n y y n -TFLOPS y n n n n n n -AIX-5.1 32-bit y n y n y y y -AIX-5.1 64-bit y n y n y y y -WinXP (6) y n y n n y y -WinXP Intel y n y n n y y -WinNT CW y n y n n y y -Mac OS X 10.2 y n y n y y n -FreeBSD y y y n y y y -Linux 2.4 gcc (3) y y y n y y y -Linux 2.4 Intel (3) y n y n y n n -Linux 2.4 PGI (3) y n y n y n n -Linux 2.4 IA32 Intel y n y n y y y -Linux 2.4 IA64 Intel y n y n y y y - - Notes: (1) Using mpich 1.2.4. - (2) Using mpt and mpich 1.2.4. - (3) Linux 2.4 with GNU, Intel, and PGI compilers, respectively. - (4) Shared libraries are provided only for the C library, except - on Windows where they are provided for C and C++. - (5) Using mpt. - (6) Binaries only; source code for this platform is not being - released at this time. - (7) Includes the H4toH5 Library and the h4toh5 and h5toh4 - utilities. - Compiler versions for each platform are listed in the preceding - "Platforms Tested" table. - - - -Known Problems -============== -* The dataset created or rewritten with the v1.6.3 library or after can't - be read with the v1.6.2 library or before when Fletcher32 EDC(filter) is - enabled. There was a bug in the calculating code of the Fletcher32 - checksum in the library before v1.6.3. The checksum value wasn't consistent - between big-endian and little-endian systems. This bug was fixed in - Release 1.6.3. However, after fixing the bug, the checksum value is no - longer the same as before on little-endian system. The library release - after 1.6.4 can still read the dataset created or rewritten with the library - of v1.6.2 or before. SLU - 2005/6/30 -* For the version 6(6.02 and 6.04) of Portland Group compiler on AMD Opteron - processor, there's a bug in the compiler for optimization(-O2). The library - failed in several tests but all related to multi driver. The problem has - been reported to the vendor. -* On windows XP, Fortran DLLs are not working with DEC Fortran 6.6c and Intel - 8.1. -* On IBM AIX systems, parallel HDF5 mode will fail some tests with error - messages like "INFO: 0031-XXX ...". This is from the command poe. - Set the environment variable MP_INFOLEVEL to 0 to minimize the messages - and run the tests again. - The tests may fail with messages like "The socket name is already - in use". HDF5 does not use sockets (except for stream-VFD). This is - due to problems of the poe command trying to set up the debug socket. - Check if there are many old /tmp/s.pedb.* staying around. These are - sockets used by the poe command and left behind due to failed commands. - Ask your system administrator to clean them out. Lastly, request IBM - to provide a mean to run poe without the debug socket. - -* The C++ library's tests fails when compiling with PGI C++ compiler. The - workaround until the problem is correctly handled is to use the - flag "--instantiate=local" prior to the configure and build steps, as: - setenv CXX "pgCC --instantiate=local" for pgCC 5.02 and higher - setenv CXX "pgCC -tlocal" for others - -* The h5dump tests may fail to match the expected output on some platforms - (e.g. parallel jobs, Windows) where the error messages directed to - "stderr" do not appear in the "right order" with output from stdout. - This is not an error. - -* The stream-vfd test uses ip port 10007 for testing. If another - application is already using that port address, the test will hang - indefinitely and has to be terminated by the kill command. To try the - test again, change the port address in test/stream_test.c to one not - being used in the host. - -* The --enable-static-exec configure flag fails to compile for Solaris - platforms. This is due to the fact that not all of the system - libraries on Solaris are available in a static format. - - The --enable-static-exec configure flag also fails to correctly compile - on IBM SP2 platform for the serial mode. The parallel mode works fine - with this option. - - It is suggested that you don't use this option on these platforms - during configuration. - -* With the gcc 2.95.2 compiler, HDF 5 uses the `-ansi' flag during - compilation. The ANSI version of the compiler complains about not being - able to handle the `long long' datatype with the warning: - - warning: ANSI C does not support `long long' - - This warning is innocuous and can be safely ignored. - - -* The Stream VFD was not tested yet under Windows. It is not supported - in the TFLOPS machine. - - -* The ./dsets tests failed in the TFLOPS machine if the test program, - dsets.c, is compiled with the -O option. The hdf5 library still works - correctly with the -O option. The test program works fine if it is - compiled with -O1 or -O0. Only -O (same as -O2) causes the test - program to fail. - -* Certain platforms give false negatives when testing h5ls: - - Cray J90 and Cray T90IEEE give errors during testing when displaying - some floating-point values. These are benign differences due to - the different precision in the values displayed and h5ls appears to - be dumping floating-point numbers correctly. - -* Before building HDF5 F90 Library from source on Crays - replace H5Aff.f90, H5Dff.f90 and H5Pff.f90 files in the fortran/src - subdirectory in the top level directory with the Cray-specific files - from the site: -ftp://hdf.ncsa.uiuc.edu/pub/outgoing/hdf5/hdf5-1.6.2/F90_source_for_Crays - -* On some platforms that use Intel and Absoft compilers to build HDF5 fortran library, - compilation may fail for fortranlib_test.f90, fflush1.f90 and fflush2.f90 - complaining about exit subroutine. Comment out the line - IF (total_error .ne. 0) CALL exit (total_error) - -* On IA32 and IA64 systems, if you use a compiler other than GCC (such as - Intel's ecc or icc compilers), you will need to modify the generated - "libtool" program after configuration is finished. On or around line 104 of - the libtool file, there are lines which look like: - - # How to pass a linker flag through the compiler. - wl="" - - change these lines to this: - - # How to pass a linker flag through the compiler. - wl="-Wl," - - UPDATE: This is now done automatically by the configure script. However, if - you still experience a problem, you may want to check this line in the - libtool file and make sure that it has the correct value. - -* Information about building with PGI and Intel compilers is available in - INSTALL file sections 5.7 and 5.8 - -* On at least one system, (SDSC DataStar), the scheduler (in this case - LoadLeveler) sends job status updates to standard error when you run - any executable that was compiled with the parallel compilers. - - This causes problems when running "make check" on parallel builds, as - many of the tool tests function by saving the output from test runs, - and comparing it to an exemplar. - - The best solution is to reconfigure the target system so it no longer - inserts the extra text. However, this may not be practical. - - In such cases, one solution is to "setenv HDF5_Make_Ignore yes" prior to - the configure and build. This will cause "make check" to continue after - detecting errors in the tool tests. However, in the case of SDSC DataStar, - it also leaves you with some 150 "failed" tests to examine by hand. - - A second solution is to write a script to run serial tests and filter - out the text added by the scheduler. A sample script used on SDSC - DataStar is given below, but you will probably have to customize it - for your installation. - - Observe that the basic idea is to insert the script as the first item - on the command line which executes the the test. The script then - executes the test and filters out the offending text before passing - it on. - - #!/bin/csh - - set STDOUT_FILE=~/bin/serial_filter.stdout - set STDERR_FILE=~/bin/serial_filter.stderr - - rm -f $STDOUT_FILE $STDERR_FILE - - ($* > $STDOUT_FILE) >& $STDERR_FILE - - set RETURN_VALUE=$status - - cat $STDOUT_FILE - - tail +3 $STDERR_FILE - - exit $RETURN_VALUE - - You get the HDF make files and test scipts to execute your filter script - by setting the environment variable "RUNSERIAL" to the full path of the - script prior to running configure for parallel builds. Remember to - "unsetenv RUNSERIAL" before running configure for a serial build. - - Note that the RUNSERIAL environment variable exists so that we can - can prefix serial runs as necessary on the target system. On DataStar, - no prefix is necessary. However on an MPICH system, the prefix might - have to be set to something like "/usr/local/mpi/bin/mpirun -np 1" to - get the serial tests to run at all. - - In such cases, you will have to include the regular prefix in your - filter script. diff --git a/doc/html/Attributes.html b/doc/html/Attributes.html deleted file mode 100644 index 63f8e5e..0000000 --- a/doc/html/Attributes.html +++ /dev/null @@ -1,287 +0,0 @@ - - - - Attribute Interface (H5A) - - - - - - - - - - -
-
- - - -
- HDF5 documents and links 
- Introduction to HDF5 
- HDF5 Reference Manual 
- HDF5 User's Guide for Release 1.6 
- -
- And in this document, the - HDF5 User's Guide from Release 1.4.5:     -
- Files   - Datasets   - Datatypes   - Dataspaces   - Groups   -
- References   - Attributes   - Property Lists   - Error Handling   -
- Filters   - Caching   - Chunking   - Mounting Files   -
- Performance   - Debugging   - Environment   - DDL   -
-
-
-

The Attribute Interface (H5A)

- -

1. Introduction

- -

The attribute API (H5A) is primarily designed to easily allow small - datasets to be attached to primary datasets as metadata information. - Additional goals for the H5A interface include keeping storage - requirements for each attribute to a minimum and easily sharing - attributes among datasets. -

Because attributes are intended to be small objects, large datasets - intended as additional information for a primary dataset should be - stored as supplemental datasets in a group with the primary dataset. - Attributes can then be attached to the group containing everything to - indicate a particular type of dataset with supplemental datasets is - located in the group. How small is "small" is not defined by the - library and is up to the user's interpretation. -

Attributes are not separate objects in the file, they are always - contained in the object header of the object they are attached to. The - I/O functions defined below are required to read or write attribute - information, not the H5D I/O routines. - -

2. Creating, Opening, Closing and Deleting Attributes

- -

Attributes are created with the H5Acreate() function, - and existing attributes can be accessed with either the - H5Aopen_name() or H5Aopen_idx() functions. All - three functions return an object ID which should be eventually released - by calling H5Aclose(). - -

-
hid_t H5Acreate (hid_t loc_id, const char - *name, hid_t type_id, hid_t space_id, - hid_t create_plist_id) -
This function creates an attribute which is attached to the object - specified with loc_id. The name specified with name - for each attribute for an object must be unique for that object. The type_id - and space_id are created with the H5T and H5S interfaces - respectively. Currently only simple dataspaces are allowed for attribute - dataspaces. The create_plist_id property list is currently - unused, but will be used in the future for optional properties of - attributes. The attribute ID returned from this function must be released - with H5Aclose or resource leaks will develop. Attempting to create an - attribute with the same name as an already existing attribute will fail, - leaving the pre-existing attribute in place. - This function returns an attribute ID for success or negative for failure. - -

-
hid_t H5Aopen_name (hid_t loc_id, const char - *name) -
This function opens an attribute which is attached to the object - specified with loc_id. The name specified with name - indicates the attribute to access. The attribute ID returned from this - function must be released with H5Aclose or resource leaks will develop. - This function returns an attribute ID for success or negative for failure. - -

-
hid_t H5Aopen_idx (hid_t loc_id, unsigned - idx) -
This function opens an attribute which is attached to the object - specified with loc_id. The attribute specified with idx - indicates the idxth attribute to access, starting with '0'. The - attribute ID returned from this function must be released with H5Aclose or - resource leaks will develop. - This function returns an attribute ID for success or negative for failure. - -

-
herr_t H5Aclose (hid_t attr_id) -
This function releases an attribute from use. Further use of the - attribute ID will result in undefined behavior. - This function returns non-negative on success, negative on failure. - -

-
herr_t H5Adelete (hid_t loc_id, - const char *name) -
This function removes the named attribute from a dataset or group. - This function should not be used when attribute IDs are open on loc_id - as it may cause the internal indexes of the attributes to change and future - writes to the open attributes to produce incorrect results. - Returns non-negative on success, negative on failure. -
- -

3. Attribute I/O Functions

- -

Attributes may only be written as an entire object, no partial I/O - is currently supported. - -

-
herr_t H5Awrite (hid_t attr_id, - hid_t mem_type_id, void *buf) -
This function writes an attribute, specified with attr_id, - with mem_type_id specifying the datatype in memory. The entire - attribute is written from buf to the file. - This function returns non-negative on success, negative on failure. - -

-
herr_t H5Aread (hid_t attr_id, - hid_t mem_type_id, void *buf) -
This function read an attribute, specified with attr_id, with - mem_type_id specifying the datatype in memory. The entire - attribute is read into buf from the file. - This function returns non-negative on success, negative on failure. - -
- -

4. Attribute Inquiry Functions

- -
-
herr_t H5Aiterate (hid_t loc_id, - unsigned *attr_number, - H5A_operator operator, - void *operator_data) -
This function iterates over the attributes of the dataset or group - specified with loc_id. For each attribute of the object, the - operator_data and some additional information (specified below) - are passed to the operator function. The iteration begins with - the *attr_number object in the group and the next attribute to be - processed by the operator is returned in *attr_number. -

The iterator returns a negative value if something is wrong, the return - value of the last operator if it was non-zero, or zero if all attributes - were processed. -

The prototype for H5A_operator_t is:
- typedef herr_t (*H5A_operator_t)(hid_t loc_id, - const char *attr_name, void *operator_data); -

The operation receives the ID for the group or dataset being iterated over - (loc_id), the name of the current attribute about the object (attr_name) - and the pointer to the operator data passed in to H5Aiterate - (operator_data). The return values from an operator are: -

    -
  • Zero causes the iterator to continue, returning zero when all - attributes have been processed. -
  • Positive causes the iterator to immediately return that positive - value, indicating short-circuit success. The iterator can be - restarted at the next attribute. -
  • Negative causes the iterator to immediately return that value, - indicating failure. The iterator can be restarted at the next - attribute. -
-

-
hid_t H5Aget_space (hid_t attr_id) -
This function retrieves a copy of the dataspace for an attribute. - The dataspace ID returned from this function must be released with H5Sclose - or resource leaks will develop. - This function returns a dataspace ID for success or negative for failure. -

-
hid_t H5Aget_type (hid_t attr_id) -
This function retrieves a copy of the datatype for an attribute. - The datatype ID returned from this function must be released with H5Tclose - or resource leaks will develop. - This function returns a datatype ID for success or negative for failure. -

-
ssize_t H5Aget_name (hid_t attr_id, - size_t buf_size, char *buf) -
This function retrieves the name of an attribute for an attribute ID. - Up to buf_size characters are stored in buf followed by a - '\0' string terminator. If the name of the attribute is longer than - buf_size-1, the string terminator is stored in the last position - of the buffer to properly terminate the string. - This function returns the length of the attribute's name (which may be - longer than buf_size) on success or negative for failure. -

-
int H5Aget_num_attrs (hid_t loc_id) -
This function returns the number of attributes attached to a dataset or - group, loc_id. - This function returns non-negative for success or negative for failure. -
- - -
-
- - - -
- HDF5 documents and links 
- Introduction to HDF5 
- HDF5 Reference Manual 
- HDF5 User's Guide for Release 1.6 
- -
- And in this document, the - HDF5 User's Guide from Release 1.4.5:     -
- Files   - Datasets   - Datatypes   - Dataspaces   - Groups   -
- References   - Attributes   - Property Lists   - Error Handling   -
- Filters   - Caching   - Chunking   - Mounting Files   -
- Performance   - Debugging   - Environment   - DDL   -
-
-
-
-HDF Help Desk -
-Describes HDF5 Release 1.4.5, February 2003 -
- - -Last modified: 6 July 2000 - - - - diff --git a/doc/html/Big.html b/doc/html/Big.html deleted file mode 100644 index fe00ff8..0000000 --- a/doc/html/Big.html +++ /dev/null @@ -1,122 +0,0 @@ - - - - Big Datasets on Small Machines - - - -

Big Datasets on Small Machines

- -

1. Introduction

- -

The HDF5 library is able to handle files larger than the - maximum file size, and datasets larger than the maximum memory - size. For instance, a machine where sizeof(off_t) - and sizeof(size_t) are both four bytes can handle - datasets and files as large as 18x10^18 bytes. However, most - Unix systems limit the number of concurrently open files, so a - practical file size limit is closer to 512GB or 1TB. - -

Two "tricks" must be imployed on these small systems in order - to store large datasets. The first trick circumvents the - off_t file size limit and the second circumvents - the size_t main memory limit. - -

2. File Size Limits

- -

Systems that have 64-bit file addresses will be able to access - those files automatically. One should see the following output - from configure: - -

-checking size of off_t... 8
-    
- -

Also, some 32-bit operating systems have special file systems - that can support large (>2GB) files and HDF5 will detect - these and use them automatically. If this is the case, the - output from configure will show: - -

-checking for lseek64... yes
-checking for fseek64... yes
-    
- -

Otherwise one must use an HDF5 file family. Such a family is - created by setting file family properties in a file access - property list and then supplying a file name that includes a - printf-style integer format. For instance: - -

-hid_t plist, file;
-plist = H5Pcreate (H5P_FILE_ACCESS);
-H5Pset_family (plist, 1<<30, H5P_DEFAULT);
-file = H5Fcreate ("big%03d.h5", H5F_ACC_TRUNC, H5P_DEFAULT, plist);
-    
- -

The second argument (1<<30) to - H5Pset_family() indicates that the family members - are to be 2^30 bytes (1GB) each although we could have used any - reasonably large value. In general, family members cannot be - 2GB because writes to byte number 2,147,483,647 will fail, so - the largest safe value for a family member is 2,147,483,647. - HDF5 will create family members on demand as the HDF5 address - space increases, but since most Unix systems limit the number of - concurrently open files the effective maximum size of the HDF5 - address space will be limited (the system on which this was - developed allows 1024 open files, so if each family member is - approx 2GB then the largest HDF5 file is approx 2TB). - -

If the effective HDF5 address space is limited then one may be - able to store datasets as external datasets each spanning - multiple files of any length since HDF5 opens external dataset - files one at a time. To arrange storage for a 5TB dataset split - among 1GB files one could say: - -

-hid_t plist = H5Pcreate (H5P_DATASET_CREATE);
-for (i=0; i<5*1024; i++) {
-   sprintf (name, "velocity-%04d.raw", i);
-   H5Pset_external (plist, name, 0, (size_t)1<<30);
-}
-    
- -

3. Dataset Size Limits

- -

The second limit which must be overcome is that of - sizeof(size_t). HDF5 defines a data type called - hsize_t which is used for sizes of datasets and is, - by default, defined as unsigned long long. - -

To create a dataset with 8*2^30 4-byte integers for a total of - 32GB one first creates the dataspace. We give two examples - here: a 4-dimensional dataset whose dimension sizes are smaller - than the maximum value of a size_t, and a - 1-dimensional dataset whose dimension size is too large to fit - in a size_t. - -

-hsize_t size1[4] = {8, 1024, 1024, 1024};
-hid_t space1 = H5Screate_simple (4, size1, size1);
-
-hsize_t size2[1] = {8589934592LL};
-hid_t space2 = H5Screate_simple (1, size2, size2};
-    
- -

However, the LL suffix is not portable, so it may - be better to replace the number with - (hsize_t)8*1024*1024*1024. - -

For compilers that don't support long long large - datasets will not be possible. The library performs too much - arithmetic on hsize_t types to make the use of a - struct feasible. - -


-
Robb Matzke
- - -Last modified: Sun Jul 19 11:37:25 EDT 1998 - - - diff --git a/doc/html/Caching.html b/doc/html/Caching.html deleted file mode 100644 index d194ba3..0000000 --- a/doc/html/Caching.html +++ /dev/null @@ -1,190 +0,0 @@ - - - - Data Caching - - - - - - - - - - -
-
- - - -
- HDF5 documents and links 
- Introduction to HDF5 
- HDF5 Reference Manual 
- HDF5 User's Guide for Release 1.6 
- -
- And in this document, the - HDF5 User's Guide from Release 1.4.5:     -
- Files   - Datasets   - Datatypes   - Dataspaces   - Groups   -
- References   - Attributes   - Property Lists   - Error Handling   -
- Filters   - Caching   - Chunking   - Mounting Files   -
- Performance   - Debugging   - Environment   - DDL   -
-
-
-

Data Caching

- -

1. Meta Data Caching

- -

The HDF5 library caches two types of data: meta data and raw - data. The meta data cache holds file objects like the file - header, symbol table nodes, global heap collections, object - headers and their messages, etc. in a partially decoded - state. The cache has a fixed number of entries which is set with - the file access property list (defaults to 10k) and each entry - can hold a single meta data object. Collisions between objects - are handled by preempting the older object in favor of the new - one. - -

2. Raw Data Chunk Caching

- -

Raw data chunks are cached because I/O requests at the - application level typically don't map well to chunks at the - storage level. The chunk cache has a maximum size in bytes - set with the file access property list (defaults to 1MB) and - when the limit is reached chunks are preempted based on the - following set of heuristics. - -

    -
  • Chunks which have not been accessed for a long time - relative to other chunks are penalized. -
  • Chunks which have been accessed frequently in the recent - past are favored. -
  • Chunks which are completely read and not written, completely - written but not read, or completely read and completely - written are penalized according to w0, an - application-defined weight between 0 and 1 inclusive. A weight - of zero does not penalize such chunks while a weight of 1 - penalizes those chunks more than all other chunks. The - default is 0.75. -
  • Chunks which are larger than the maximum cache size do not - participate in the cache. -
- -

One should choose large values for w0 if I/O requests - typically do not overlap but smaller values for w0 if - the requests do overlap. For instance, reading an entire 2d - array by reading from non-overlapping "windows" in a row-major - order would benefit from a high w0 value while reading - a diagonal accross the dataset where each request overlaps the - previous request would benefit from a small w0. - -

3. Data Caching Operations

- -

The cache parameters for both caches are part of a file access - property list and are set and queried with this pair of - functions: - -

-
herr_t H5Pset_cache(hid_t plist, unsigned int - mdc_nelmts, size_t rdcc_nbytes, double - w0) -
herr_t H5Pget_cache(hid_t plist, unsigned int - *mdc_nelmts, size_t *rdcc_nbytes, double - w0) -
Sets or queries the meta data cache and raw data chunk cache - parameters. The plist is a file access property - list. The number of elements (objects) in the meta data cache - is mdc_nelmts. The total size of the raw data chunk - cache and the preemption policy is rdcc_nbytes and - w0. For H5Pget_cache() any (or all) of - the pointer arguments may be null pointers. -
- - -
-
- - - -
- HDF5 documents and links 
- Introduction to HDF5 
- HDF5 Reference Manual 
- HDF5 User's Guide for Release 1.6 
- -
- And in this document, the - HDF5 User's Guide from Release 1.4.5:     -
- Files   - Datasets   - Datatypes   - Dataspaces   - Groups   -
- References   - Attributes   - Property Lists   - Error Handling   -
- Filters   - Caching   - Chunking   - Mounting Files   -
- Performance   - Debugging   - Environment   - DDL   -
-
-
-
-HDF Help Desk -
-Describes HDF5 Release 1.4.5, February 2003 -
- -Last modified: 13 December 1999 - - - - - diff --git a/doc/html/Chunk_f1.gif b/doc/html/Chunk_f1.gif deleted file mode 100644 index d73201a..0000000 Binary files a/doc/html/Chunk_f1.gif and /dev/null differ diff --git a/doc/html/Chunk_f1.obj b/doc/html/Chunk_f1.obj deleted file mode 100644 index 004204a..0000000 --- a/doc/html/Chunk_f1.obj +++ /dev/null @@ -1,252 +0,0 @@ -%TGIF 3.0-p17 -state(0,33,100.000,0,0,0,16,1,9,1,1,0,0,0,1,1,1,'Courier',0,17,0,0,0,10,0,0,1,1,0,16,0,0,1,1,1,0,1088,1408,0,0,2880). -% -% @(#)$Header$ -% %W% -% -unit("1 pixel/pixel"). -color_info(11,65535,0,[ - "magenta", 65535, 0, 65535, 65280, 0, 65280, 1, - "red", 65535, 0, 0, 65280, 0, 0, 1, - "green", 0, 65535, 0, 0, 65280, 0, 1, - "blue", 0, 0, 65535, 0, 0, 65280, 1, - "yellow", 65535, 65535, 0, 65280, 65280, 0, 1, - "pink", 65535, 49344, 52171, 65280, 49152, 51968, 1, - "cyan", 0, 65535, 65535, 0, 65280, 65280, 1, - "CadetBlue", 24415, 40606, 41120, 24320, 40448, 40960, 1, - "white", 65535, 65535, 65535, 65280, 65280, 65280, 1, - "black", 0, 0, 0, 0, 0, 0, 1, - "DarkSlateGray", 12079, 20303, 20303, 12032, 20224, 20224, 1 -]). -page(1,"",1). -text('black',432,272,'Courier',0,17,2,1,0,1,49,28,302,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Point", - "Written"]). -box('black',256,288,320,352,0,3,1,70,0,0,0,0,0,'3',[ -]). -text('black',288,272,'Courier',0,17,1,1,0,1,49,14,75,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Dataset"]). -box('black',352,288,384,320,5,1,1,77,5,0,0,0,0,'1',[ -]). -text('black',368,272,'Courier',0,17,1,1,0,1,35,14,80,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Chunk"]). -box('black',96,32,544,384,0,1,1,118,0,0,0,0,0,'1',[ -]). -box('black',128,64,256,128,5,1,1,131,5,0,0,0,0,'1',[ -]). -box('black',128,128,256,192,5,1,1,132,5,0,0,0,0,'1',[ -]). -box('black',384,64,512,128,5,1,1,137,5,0,0,0,0,'1',[ -]). -box('black',256,128,384,192,5,1,1,142,5,0,0,0,0,'1',[ -]). -box('black',256,192,384,256,5,1,1,144,5,0,0,0,0,'1',[ -]). -box('black',384,192,512,256,5,1,1,146,5,0,0,0,0,'1',[ -]). -box('black',128,64,432,224,0,3,1,26,0,0,0,0,0,'3',[ -]). -group([ -polygon('black',11,[ - 152,80,154,86,160,86,155,89,157,94,152,91,147,94,149,89, - 144,86,150,86,152,80],1,1,1,0,178,0,0,0,0,0,'1', - "000",[ -]), -box('black',148,84,156,92,0,1,0,179,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',152,83,'Courier',0,17,1,1,0,1,112,14,180,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "", 1, 0, 0, -text('black',152,80,'Courier',0,17,1,1,0,1,0,14,181,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - ""])) -]) -], -182,0,0,[ -]). -group([ -polygon('black',11,[ - 200,96,202,102,208,102,203,105,205,110,200,107,195,110,197,105, - 192,102,198,102,200,96],1,1,1,0,188,0,0,0,0,0,'1', - "000",[ -]), -box('black',196,100,204,108,0,1,0,189,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',200,99,'Courier',0,17,1,1,0,1,112,14,190,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "", 1, 0, 0, -text('black',200,96,'Courier',0,17,1,1,0,1,0,14,191,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - ""])) -]) -], -192,0,0,[ -]). -group([ -polygon('black',11,[ - 168,128,170,134,176,134,171,137,173,142,168,139,163,142,165,137, - 160,134,166,134,168,128],1,1,1,0,198,0,0,0,0,0,'1', - "000",[ -]), -box('black',164,132,172,140,0,1,0,199,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',168,131,'Courier',0,17,1,1,0,1,112,14,200,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "", 1, 0, 0, -text('black',168,128,'Courier',0,17,1,1,0,1,0,14,201,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - ""])) -]) -], -202,0,0,[ -]). -group([ -polygon('black',11,[ - 168,160,170,166,176,166,171,169,173,174,168,171,163,174,165,169, - 160,166,166,166,168,160],1,1,1,0,208,0,0,0,0,0,'1', - "000",[ -]), -box('black',164,164,172,172,0,1,0,209,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',168,163,'Courier',0,17,1,1,0,1,112,14,210,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "", 1, 0, 0, -text('black',168,160,'Courier',0,17,1,1,0,1,0,14,211,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - ""])) -]) -], -212,0,0,[ -]). -group([ -polygon('black',11,[ - 136,144,138,150,144,150,139,153,141,158,136,155,131,158,133,153, - 128,150,134,150,136,144],1,1,1,0,218,0,0,0,0,0,'1', - "000",[ -]), -box('black',132,148,140,156,0,1,0,219,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',136,147,'Courier',0,17,1,1,0,1,112,14,220,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "", 1, 0, 0, -text('black',136,144,'Courier',0,17,1,1,0,1,0,14,221,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - ""])) -]) -], -222,0,0,[ -]). -group([ -polygon('black',11,[ - 248,144,250,150,256,150,251,153,253,158,248,155,243,158,245,153, - 240,150,246,150,248,144],1,1,1,0,228,0,0,0,0,0,'1', - "000",[ -]), -box('black',244,148,252,156,0,1,0,229,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',248,147,'Courier',0,17,1,1,0,1,112,14,230,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "", 1, 0, 0, -text('black',248,144,'Courier',0,17,1,1,0,1,0,14,231,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - ""])) -]) -], -232,0,0,[ -]). -group([ -polygon('black',11,[ - 296,176,298,182,304,182,299,185,301,190,296,187,291,190,293,185, - 288,182,294,182,296,176],1,1,1,0,238,0,0,0,0,0,'1', - "000",[ -]), -box('black',292,180,300,188,0,1,0,239,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',296,179,'Courier',0,17,1,1,0,1,112,14,240,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "", 1, 0, 0, -text('black',296,176,'Courier',0,17,1,1,0,1,0,14,241,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - ""])) -]) -], -242,0,0,[ -]). -group([ -polygon('black',11,[ - 360,208,362,214,368,214,363,217,365,222,360,219,355,222,357,217, - 352,214,358,214,360,208],1,1,1,0,248,0,0,0,0,0,'1', - "000",[ -]), -box('black',356,212,364,220,0,1,0,249,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',360,211,'Courier',0,17,1,1,0,1,112,14,250,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "", 1, 0, 0, -text('black',360,208,'Courier',0,17,1,1,0,1,0,14,251,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - ""])) -]) -], -252,0,0,[ -]). -group([ -polygon('black',11,[ - 408,192,410,198,416,198,411,201,413,206,408,203,403,206,405,201, - 400,198,406,198,408,192],1,1,1,0,258,0,0,0,0,0,'1', - "000",[ -]), -box('black',404,196,412,204,0,1,0,259,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',408,195,'Courier',0,17,1,1,0,1,112,14,260,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "", 1, 0, 0, -text('black',408,192,'Courier',0,17,1,1,0,1,0,14,261,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - ""])) -]) -], -262,0,0,[ -]). -group([ -polygon('black',11,[ - 376,128,378,134,384,134,379,137,381,142,376,139,371,142,373,137, - 368,134,374,134,376,128],1,1,1,0,268,0,0,0,0,0,'1', - "000",[ -]), -box('black',372,132,380,140,0,1,0,269,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',376,131,'Courier',0,17,1,1,0,1,112,14,270,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "", 1, 0, 0, -text('black',376,128,'Courier',0,17,1,1,0,1,0,14,271,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - ""])) -]) -], -272,0,0,[ -]). -group([ -polygon('black',11,[ - 408,80,410,86,416,86,411,89,413,94,408,91,403,94,405,89, - 400,86,406,86,408,80],1,1,1,0,278,0,0,0,0,0,'1', - "000",[ -]), -box('black',404,84,412,92,0,1,0,279,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',408,83,'Courier',0,17,1,1,0,1,112,14,280,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "", 1, 0, 0, -text('black',408,80,'Courier',0,17,1,1,0,1,0,14,281,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - ""])) -]) -], -282,0,0,[ -]). -group([ -polygon('black',11,[ - 424,304,426,310,432,310,427,313,429,318,424,315,419,318,421,313, - 416,310,422,310,424,304],1,1,1,0,288,0,0,0,0,0,'1', - "000",[ -]), -box('black',420,308,428,316,0,1,0,289,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',424,307,'Courier',0,17,1,1,0,1,112,14,290,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "", 1, 0, 0, -text('black',424,304,'Courier',0,17,1,1,0,1,0,14,291,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - ""])) -]) -], -292,0,0,[ -]). diff --git a/doc/html/Chunk_f2.gif b/doc/html/Chunk_f2.gif deleted file mode 100644 index 68f9433..0000000 Binary files a/doc/html/Chunk_f2.gif and /dev/null differ diff --git a/doc/html/Chunk_f2.obj b/doc/html/Chunk_f2.obj deleted file mode 100644 index 7361c1c..0000000 --- a/doc/html/Chunk_f2.obj +++ /dev/null @@ -1,95 +0,0 @@ -%TGIF 3.0-p17 -state(0,33,100.000,0,0,0,16,1,9,1,1,6,1,1,0,1,0,'Courier',0,17,0,2,0,10,0,0,1,1,0,16,0,0,1,1,1,0,1088,1408,0,0,2880). -% -% @(#)$Header$ -% %W% -% -unit("1 pixel/pixel"). -color_info(11,65535,0,[ - "magenta", 65535, 0, 65535, 65280, 0, 65280, 1, - "red", 65535, 0, 0, 65280, 0, 0, 1, - "green", 0, 65535, 0, 0, 65280, 0, 1, - "blue", 0, 0, 65535, 0, 0, 65280, 1, - "yellow", 65535, 65535, 0, 65280, 65280, 0, 1, - "pink", 65535, 49344, 52171, 65280, 49152, 51968, 1, - "cyan", 0, 65535, 65535, 0, 65280, 65280, 1, - "CadetBlue", 24415, 40606, 41120, 24320, 40448, 40960, 1, - "white", 65535, 65535, 65535, 65280, 65280, 65280, 1, - "black", 0, 0, 0, 0, 0, 0, 1, - "DarkSlateGray", 12079, 20303, 20303, 12032, 20224, 20224, 1 -]). -page(1,"",1). -group([ -box('black',192,416,512,544,0,1,0,22,0,0,0,0,0,'1',[ -]), -oval('black',192,384,512,448,0,1,1,23,0,0,0,0,0,'1',[ -]), -arc('black',0,1,1,0,192,512,352,544,192,544,512,544,0,320,64,11520,11520,24,0,0,8,3,0,0,0,'1','8','3',[ -]), -poly('black',2,[ - 192,416,192,544],0,1,1,25,0,0,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]), -poly('black',2,[ - 512,416,512,544],0,1,1,26,0,0,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]), -box('black',196,452,508,572,0,1,0,27,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',352,451,'Courier',0,17,1,1,0,1,112,14,28,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "HDF5 File", 1, 0, 0, -text('black',351,505,'Courier',0,17,1,1,0,1,63,14,29,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "HDF5 File"])) -]) -], -30,0,0,[ -]). -group([ -polygon('black',5,[ - 240,160,240,352,464,352,464,160,240,160],0,1,1,0,63,0,0,0,0,0,'1', - "00",[ -]), -box('black',254,164,450,348,0,1,0,64,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',352,163,'Courier',0,17,1,1,0,1,112,14,65,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Filter", 1, 0, 0, -text('black',351,242,'Courier',0,17,2,1,0,1,49,28,66,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Filter", - "Pipeine"])) -]) -], -62,0,0,[ -]). -group([ -polygon('black',13,[ - 304,85,304,107,336,107,336,128,368,128,368,107,400,107,400,85, - 368,85,368,64,336,64,336,85,304,85],0,1,1,0,103,0,0,0,0,0,'1', - "0000",[ -]), -box('black',307,68,397,124,0,1,0,104,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',352,67,'Courier',0,17,1,1,0,1,112,14,105,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Modify Bytes", 1, 0, 0, -text('black',352,89,'Courier',0,17,1,1,0,1,84,14,106,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Modify Bytes"])) -]) -], -107,0,0,[ -]). -box('black',176,48,528,592,0,1,1,143,0,0,0,0,0,'1',[ -]). -poly('black',4,[ - 256,416,256,128,256,96,304,96],1,7,1,168,1,0,2,0,22,9,0,0,0,'7','22','9', - "6",[ -]). -poly('black',4,[ - 400,96,448,96,448,128,448,416],1,7,1,173,1,0,2,0,22,9,0,0,0,'7','22','9', - "6",[ -]). -text('black',432,128,'Courier',0,17,1,0,0,1,35,14,312,0,11,3,2,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Chunk"]). -text('black',240,368,'Courier',0,17,1,0,0,1,35,14,314,0,11,3,2,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Chunk"]). diff --git a/doc/html/Chunk_f3.gif b/doc/html/Chunk_f3.gif deleted file mode 100644 index e6e8457..0000000 Binary files a/doc/html/Chunk_f3.gif and /dev/null differ diff --git a/doc/html/Chunk_f4.gif b/doc/html/Chunk_f4.gif deleted file mode 100644 index 76f0994..0000000 Binary files a/doc/html/Chunk_f4.gif and /dev/null differ diff --git a/doc/html/Chunk_f5.gif b/doc/html/Chunk_f5.gif deleted file mode 100644 index 3b12174..0000000 Binary files a/doc/html/Chunk_f5.gif and /dev/null differ diff --git a/doc/html/Chunk_f6.gif b/doc/html/Chunk_f6.gif deleted file mode 100644 index 616946d..0000000 Binary files a/doc/html/Chunk_f6.gif and /dev/null differ diff --git a/doc/html/Chunk_f6.obj b/doc/html/Chunk_f6.obj deleted file mode 100644 index 2b2f371..0000000 --- a/doc/html/Chunk_f6.obj +++ /dev/null @@ -1,107 +0,0 @@ -%TGIF 3.0-p17 -state(0,33,100.000,0,0,0,8,1,9,1,1,0,1,1,0,1,1,'Courier',0,17,0,0,0,10,0,0,1,1,0,16,0,0,1,1,1,0,1088,1408,0,0,2880). -% -% @(#)$Header$ -% %W% -% -unit("1 pixel/pixel"). -color_info(11,65535,0,[ - "magenta", 65535, 0, 65535, 65280, 0, 65280, 1, - "red", 65535, 0, 0, 65280, 0, 0, 1, - "green", 0, 65535, 0, 0, 65280, 0, 1, - "blue", 0, 0, 65535, 0, 0, 65280, 1, - "yellow", 65535, 65535, 0, 65280, 65280, 0, 1, - "pink", 65535, 49344, 52171, 65280, 49152, 51968, 1, - "cyan", 0, 65535, 65535, 0, 65280, 65280, 1, - "CadetBlue", 24415, 40606, 41120, 24320, 40448, 40960, 1, - "white", 65535, 65535, 65535, 65280, 65280, 65280, 1, - "black", 0, 0, 0, 0, 0, 0, 1, - "DarkSlateGray", 12079, 20303, 20303, 12032, 20224, 20224, 1 -]). -page(1,"",1). -polygon('black',5,[ - 128,256,256,256,256,320,128,320,128,256],5,1,1,0,26,5,0,0,0,0,'1', - "00",[ -]). -polygon('black',7,[ - 256,128,256,256,128,256,128,192,192,192,192,128,256,128],5,1,1,0,25,5,0,0,0,0,'1', - "00",[ -]). -polygon('black',7,[ - 128,64,256,64,256,128,192,128,192,192,128,192,128,64],5,1,1,0,24,5,0,0,0,0,'1', - "00",[ -]). -box('black',128,64,256,320,0,3,1,22,0,0,0,0,0,'3',[ -]). -text('black',192,96,'Courier',0,17,1,1,0,1,49,14,34,0,11,3,2,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Strip 1"]). -text('black',224,160,'Courier',0,17,1,1,0,1,49,14,40,0,11,3,2,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Strip 2"]). -text('black',192,272,'Courier',0,17,1,1,0,1,49,14,46,0,11,3,2,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Strip 3"]). -polygon('black',5,[ - 448,256,576,256,576,320,448,320,448,256],5,1,1,0,59,5,0,0,0,0,'1', - "00",[ -]). -polygon('black',7,[ - 576,128,576,256,448,256,448,192,512,192,512,128,576,128],5,1,1,0,60,5,0,0,0,0,'1', - "00",[ -]). -polygon('black',7,[ - 448,64,576,64,576,128,512,128,512,192,448,192,448,64],5,1,1,0,61,5,0,0,0,0,'1', - "00",[ -]). -box('black',448,64,576,320,0,3,1,62,0,0,0,0,0,'3',[ -]). -text('black',512,96,'Courier',0,17,1,1,0,1,49,14,63,0,11,3,2,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Strip 1"]). -text('black',544,160,'Courier',0,17,1,1,0,1,49,14,64,0,11,3,2,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Strip 2"]). -text('black',512,272,'Courier',0,17,1,1,0,1,49,14,65,0,11,3,2,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Strip 3"]). -text('black',192,32,'Courier',0,17,1,1,0,1,28,14,68,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "FILE"]). -text('black',512,32,'Courier',0,17,1,1,0,1,42,14,70,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "MEMORY"]). -group([ -polygon('black',6,[ - 320,160,320,208,384,208,416,184,384,160,320,160],0,3,1,0,72,0,0,0,0,0,'3', - "00",[ -]), -box('black',324,164,388,204,0,3,0,73,0,0,0,0,0,'3',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',356,162,'Courier',0,17,1,1,0,1,112,14,74,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "TCONV", 1, 0, 0, -text('black',355,177,'Courier',0,17,1,1,0,1,35,14,75,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "TCONV"])) -]) -], -76,0,0,[ -]). -poly('black',5,[ - 256,96,288,96,320,96,320,128,320,160],1,7,1,87,1,0,5,0,22,9,0,0,0,'7','22','9', - "70",[ -]). -poly('black',2,[ - 256,184,320,184],1,7,1,88,1,0,5,0,22,9,0,0,0,'7','22','9', - "0",[ -]). -poly('black',5,[ - 256,288,288,288,320,288,320,256,320,208],1,7,1,89,1,0,5,0,22,9,0,0,0,'7','22','9', - "70",[ -]). -poly('black',5,[ - 400,160,400,128,400,96,432,96,448,96],1,7,1,92,1,0,5,0,22,9,0,0,0,'7','22','9', - "70",[ -]). -poly('black',2,[ - 416,184,512,184],1,7,1,93,1,0,5,0,22,9,0,0,0,'7','22','9', - "0",[ -]). -poly('black',5,[ - 400,208,400,256,400,288,432,288,448,288],1,7,1,94,1,0,5,0,22,9,0,0,0,'7','22','9', - "70",[ -]). -box('black',96,0,608,352,0,1,1,99,0,0,0,0,0,'1',[ -]). diff --git a/doc/html/Chunking.html b/doc/html/Chunking.html deleted file mode 100644 index 3738d9a..0000000 --- a/doc/html/Chunking.html +++ /dev/null @@ -1,313 +0,0 @@ - - - - Dataset Chunking - - - - - - - - - - -
-
- - - -
- HDF5 documents and links 
- Introduction to HDF5 
- HDF5 Reference Manual 
- HDF5 User's Guide for Release 1.6 
- -
- And in this document, the - HDF5 User's Guide from Release 1.4.5:     -
- Files   - Datasets   - Datatypes   - Dataspaces   - Groups   -
- References   - Attributes   - Property Lists   - Error Handling   -
- Filters   - Caching   - Chunking   - Mounting Files   -
- Performance   - Debugging   - Environment   - DDL   -
-
-
-

Dataset Chunking Issues

- -

Table of Contents

- - - -

1. Introduction

- - -

Chunking refers to a storage layout where a dataset is - partitioned into fixed-size multi-dimensional chunks. The - chunks cover the dataset but the dataset need not be an integral - number of chunks. If no data is ever written to a chunk then - that chunk isn't allocated on disk. Figure 1 shows a 25x48 - element dataset covered by nine 10x20 chunks and 11 data points - written to the dataset. No data was written to the region of - the dataset covered by three of the chunks so those chunks were - never allocated in the file -- the other chunks are allocated at - independent locations in the file and written in their entirety. - -


Figure 1
- -

The HDF5 library treats chunks as atomic objects -- disk I/O is - always in terms of complete chunks(1). This - allows data filters to be defined by the application to perform - tasks such as compression, encryption, checksumming, - etc. on entire chunks. As shown in Figure 2, if - H5Dwrite() touches only a few bytes of the chunk, - the entire chunk is read from the file, the data passes upward - through the filter pipeline, the few bytes are modified, the - data passes downward through the filter pipeline, and the entire - chunk is written back to the file. - -


Figure 2
- -

2. The Raw Data Chunk Cache

- -

It's obvious from Figure 2 that calling H5Dwrite() - many times from the application would result in poor performance - even if the data being written all falls within a single chunk. - A raw data chunk cache layer was added between the top of the - filter stack and the bottom of the byte modification layer(2). By default, the chunk cache will store 521 - chunks or 1MB of data (whichever is less) but these values can - be modified with H5Pset_cache(). - -

The preemption policy for the cache favors certain chunks and - tries not to preempt them. - -

    -
  • Chunks that have been accessed frequently in the near past - are favored. -
  • A chunk which has just entered the cache is favored. -
  • A chunk which has been completely read or completely written - but not partially read or written is penalized according to - some application specified weighting between zero and one. -
  • A chunk which is larger than the maximum cache size is not - eligible for caching. -
- -

3. Cache Efficiency

- -

Now for some real numbers... A 2000x2000 element dataset is - created and covered by a 20x20 array of chunks (each chunk is 100x100 - elements). The raw data cache is adjusted to hold at most 25 chunks by - setting the maximum number of bytes to 25 times the chunk size in - bytes. Then the application creates a square, two-dimensional memory - buffer and uses it as a window into the dataset, first reading and then - rewriting in row-major order by moving the window across the dataset - (the read and write tests both start with a cold cache). - -

The measure of efficiency in Figure 3 is the number of bytes requested - by the application divided by the number of bytes transferred from the - file. There are at least a couple ways to get an estimate of the cache - performance: one way is to turn on cache - debugging and look at the number of cache misses. A more accurate - and specific way is to register a data filter whose sole purpose is to - count the number of bytes that pass through it (that's the method used - below). - -


Figure 3
- -

The read efficiency is less than one for two reasons: collisions in the - cache are handled by preempting one of the colliding chunks, and the - preemption algorithm occasionally preempts a chunk which hasn't been - referenced for a long time but is about to be referenced in the near - future. - -

The write test results in lower efficiency for most window - sizes because HDF5 is unaware that the application is about to - overwrite the entire dataset and must read in most chunks before - modifying parts of them. - -

There is a simple way to improve efficiency for this example. - It turns out that any chunk that has been completely read or - written is a good candidate for preemption. If we increase the - penalty for such chunks from the default 0.75 to the maximum - 1.00 then efficiency improves. - -


Figure 4
- -

The read efficiency is still less than one because of - collisions in the cache. The number of collisions can often be - reduced by increasing the number of slots in the cache. Figure - 5 shows what happens when the maximum number of slots is - increased by an order of magnitude from the default (this change - has no major effect on memory used by the test since the byte - limit was not increased for the cache). - -


Figure 5
- -

Although the application eventually overwrites every chunk - completely the library has know way of knowing this before hand - since most calls to H5Dwrite() modify only a - portion of any given chunk. Therefore, the first modification of - a chunk will cause the chunk to be read from disk into the chunk - buffer through the filter pipeline. Eventually HDF5 might - contain a data set transfer property that can turn off this read - operation resulting in write efficiency which is equal to read - efficiency. - - -

4. Fragmentation

- -

Even if the application transfers the entire dataset contents with a - single call to H5Dread() or H5Dwrite() it's - possible the request will be broken into smaller, more manageable - pieces by the library. This is almost certainly true if the data - transfer includes a type conversion. - -


Figure 6
- -

By default the strip size is 1MB but it can be changed by calling - H5Pset_buffer(). - - -

5. File Storage Overhead

- -

The chunks of the dataset are allocated at independent - locations throughout the HDF5 file and a B-tree maps chunk - N-dimensional addresses to file addresses. The more chunks that - are allocated for a dataset the larger the B-tree. Large B-trees - have two disadvantages: - -

    -
  • The file storage overhead is higher and more disk I/O is - required to traverse the tree from root to leaves. -
  • The increased number of B-tree nodes will result in higher - contention for the meta data cache. -
- -

There are three ways to reduce the number of B-tree nodes. The - obvious way is to reduce the number of chunks by choosing a larger chunk - size (doubling the chunk size will cut the number of B-tree nodes in - half). Another method is to adjust the split ratios for the B-tree by - calling H5Pset_btree_ratios(), but this method typically - results in only a slight improvement over the default settings. - Finally, the out-degree of each node can be increased by calling - H5Pset_istore_k() (increasing the out degree actually - increases file overhead while decreasing the number of nodes). - - -

6. Chunk Compression

- -

Dataset chunks can be compressed through the use of filters. - See the chapter “Filters in HDF5.” - -

Reading and rewriting compressed chunked data can result in holes - in an HDF5 file. In time, enough such holes can increase the - file size enough to impair application or library performance - when working with that file. See - “Freespace Management” - in the chapter - “Performance Analysis and Issues.” - - -


- -

Footnote 1: Parallel versions of the library - can access individual bytes of a chunk when the underlying file - uses MPI-IO. - -

Footnote 2: The raw data chunk cache was - added before the second alpha release.

- - -
-
- - - -
- HDF5 documents and links 
- Introduction to HDF5 
- HDF5 Reference Manual 
- HDF5 User's Guide for Release 1.6 
- -
- And in this document, the - HDF5 User's Guide from Release 1.4.5:     -
- Files   - Datasets   - Datatypes   - Dataspaces   - Groups   -
- References   - Attributes   - Property Lists   - Error Handling   -
- Filters   - Caching   - Chunking   - Mounting Files   -
- Performance   - Debugging   - Environment   - DDL   -
-
-
-
-HDF Help Desk -
-Describes HDF5 Release 1.4.5, February 2003 -
- -Last modified: 2 August 2001 - - - - - diff --git a/doc/html/CodeReview.html b/doc/html/CodeReview.html deleted file mode 100644 index 213cbbe..0000000 --- a/doc/html/CodeReview.html +++ /dev/null @@ -1,300 +0,0 @@ - - - - Code Review - - -

Code Review 1

- -

Some background...

-

This is one of the functions exported from the - H5B.c file that implements a B-link-tree class - without worrying about concurrency yet (thus the `Note:' in the - function prologue). The H5B.c file provides the - basic machinery for operating on generic B-trees, but it isn't - much use by itself. Various subclasses of the B-tree (like - symbol tables or indirect storage) provide their own interface - and back end to this function. For instance, - H5G_stab_find() takes a symbol table OID and a name - and calls H5B_find() with an appropriate - udata argument that eventually gets passed to the - H5G_stab_find() function. - -

- 1 /*-------------------------------------------------------------------------
- 2  * Function:    H5B_find
- 3  *
- 4  * Purpose:     Locate the specified information in a B-tree and return
- 5  *              that information by filling in fields of the caller-supplied
- 6  *              UDATA pointer depending on the type of leaf node
- 7  *              requested.  The UDATA can point to additional data passed
- 8  *              to the key comparison function.
- 9  *
-10  * Note:        This function does not follow the left/right sibling
-11  *              pointers since it assumes that all nodes can be reached
-12  *              from the parent node.
-13  *
-14  * Return:      Success:        SUCCEED if found, values returned through the
-15  *                              UDATA argument.
-16  *
-17  *              Failure:        FAIL if not found, UDATA is undefined.
-18  *
-19  * Programmer:  Robb Matzke
-20  *              matzke@llnl.gov
-21  *              Jun 23 1997
-22  *
-23  * Modifications:
-24  *
-25  *-------------------------------------------------------------------------
-26  */
-27 herr_t
-28 H5B_find (H5F_t *f, const H5B_class_t *type, const haddr_t *addr, void *udata)
-29 {
-30    H5B_t        *bt=NULL;
-31    intn         idx=-1, lt=0, rt, cmp=1;
-32    int          ret_value = FAIL;
-    
- -

All pointer arguments are initialized when defined. I don't - worry much about non-pointers because it's usually obvious when - the value isn't initialized. - -

-33 
-34    FUNC_ENTER (H5B_find, NULL, FAIL);
-35 
-36    /*
-37     * Check arguments.
-38     */
-39    assert (f);
-40    assert (type);
-41    assert (type->decode);
-42    assert (type->cmp3);
-43    assert (type->found);
-44    assert (addr && H5F_addr_defined (addr));
-    
- -

I use assert to check invariant conditions. At - this level of the library, none of these assertions should fail - unless something is majorly wrong. The arguments should have - already been checked by higher layers. It also provides - documentation about what arguments might be optional. - -

-45    
-46    /*
-47     * Perform a binary search to locate the child which contains
-48     * the thing for which we're searching.
-49     */
-50    if (NULL==(bt=H5AC_protect (f, H5AC_BT, addr, type, udata))) {
-51       HGOTO_ERROR (H5E_BTREE, H5E_CANTLOAD, FAIL);
-52    }
-    
- -

You'll see this quite often in the low-level stuff and it's - documented in the H5AC.c file. The - H5AC_protect insures that the B-tree node (which - inherits from the H5AC package) whose OID is addr - is locked into memory for the duration of this function (see the - H5AC_unprotect on line 90). Most likely, if this - node has been accessed in the not-to-distant past, it will still - be in memory and the H5AC_protect is almost a - no-op. If cache debugging is compiled in, then the protect also - prevents other parts of the library from accessing the node - while this function is protecting it, so this function can allow - the node to be in an inconsistent state while calling other - parts of the library. - -

The alternative is to call the slighlty cheaper - H5AC_find and assume that the pointer it returns is - valid only until some other library function is called, but - since we're accessing the pointer throughout this function, I - chose to use the simpler protect scheme. All protected objects - must be unprotected before the file is closed, thus the - use of HGOTO_ERROR instead of - HRETURN_ERROR. - -

-53    rt = bt->nchildren;
-54 
-55    while (lt<rt && cmp) {
-56       idx = (lt + rt) / 2;
-57       if (H5B_decode_keys (f, bt, idx)<0) {
-58          HGOTO_ERROR (H5E_BTREE, H5E_CANTDECODE, FAIL);
-59       }
-60 
-61       /* compare */
-62       if ((cmp=(type->cmp3)(f, bt->key[idx].nkey, udata,
-63                             bt->key[idx+1].nkey))<0) {
-64          rt = idx;
-65       } else {
-66          lt = idx+1;
-67       }
-68    }
-69    if (cmp) {
-70       HGOTO_ERROR (H5E_BTREE, H5E_NOTFOUND, FAIL);
-71    }
-    
- -

Code is arranged in paragraphs with a comment starting each - paragraph. The previous paragraph is a standard binary search - algorithm. The (type->cmp3)() is an indirect - function call into the subclass of the B-tree. All indirect - function calls have the function part in parentheses to document - that it's indirect (quite obvious here, but not so obvious when - the function is a variable). - -

It's also my standard practice to have side effects in - conditional expressions because I can write code faster and it's - more apparent to me what the condition is testing. But if I - have an assignment in a conditional expr, then I use an extra - set of parens even if they're not required (usually they are, as - in this case) so it's clear that I meant = instead - of ==. - -

-72 
-73    /*
-74     * Follow the link to the subtree or to the data node.
-75     */
-76    assert (idx>=0 && idxnchildren);
-77    if (bt->level > 0) {
-78       if ((ret_value = H5B_find (f, type, bt->child+idx, udata))<0) {
-79          HGOTO_ERROR (H5E_BTREE, H5E_NOTFOUND, FAIL);
-80       }
-81    } else {
-82       ret_value = (type->found)(f, bt->child+idx, bt->key[idx].nkey,
-83                                 udata, bt->key[idx+1].nkey);
-84       if (ret_value<0) {
-85          HGOTO_ERROR (H5E_BTREE, H5E_NOTFOUND, FAIL);
-86       }
-87    }
-    
- -

Here I broke the "side effect in conditional" rule, which I - sometimes do if the expression is so long that the - <0 gets lost at the end. Another thing to note is - that success/failure is always determined by comparing with zero - instead of SUCCEED or FAIL. I do this - because occassionally one might want to return other meaningful - values (always non-negative) or distinguish between various types of - failure (always negative). - -

-88 
-89 done:
-90    if (bt && H5AC_unprotect (f, H5AC_BT, addr, bt)<0) {
-91       HRETURN_ERROR (H5E_BTREE, H5E_PROTECT, FAIL);
-92    }
-93    FUNC_LEAVE (ret_value);
-94 }
-    
- -

For lack of a better way to handle errors during error cleanup, - I just call the HRETURN_ERROR macro even though it - will make the error stack not quite right. I also use short - circuiting boolean operators instead of nested if - statements since that's standard C practice. - -

Code Review 2

- - -

The following code is an API function from the H5F package... - -

- 1 /*--------------------------------------------------------------------------
- 2  NAME
- 3     H5Fflush
- 4 
- 5  PURPOSE
- 6     Flush all cached data to disk and optionally invalidates all cached
- 7     data.
- 8 
- 9  USAGE
-10     herr_t H5Fflush(fid, invalidate)
-11         hid_t fid;              IN: File ID of file to close.
-12         hbool_t invalidate;     IN: Invalidate all of the cache?
-13 
-14  ERRORS
-15     ARGS      BADTYPE       Not a file atom. 
-16     ATOM      BADATOM       Can't get file struct. 
-17     CACHE     CANTFLUSH     Flush failed. 
-18 
-19  RETURNS
-20     SUCCEED/FAIL
-21 
-22  DESCRIPTION
-23         This function flushes all cached data to disk and, if INVALIDATE
-24     is non-zero, removes cached objects from the cache so they must be
-25     re-read from the file on the next access to the object.
-26 
-27  MODIFICATIONS:
-28 --------------------------------------------------------------------------*/
-    
- -

An API prologue is used for each API function instead of my - normal function prologue. I use the prologue from Code Review 1 - for non-API functions because it's more suited to C programmers, - it requires less work to keep it synchronized with the code, and - I have better editing tools for it. - -

-29 herr_t
-30 H5Fflush (hid_t fid, hbool_t invalidate)
-31 {
-32    H5F_t        *file = NULL;
-33 
-34    FUNC_ENTER (H5Fflush, H5F_init_interface, FAIL);
-35    H5ECLEAR;
-    
- -

API functions are never called internally, therefore I always - clear the error stack before doing anything. - -

-36 
-37    /* check arguments */
-38    if (H5_FILE!=H5Aatom_group (fid)) {
-39       HRETURN_ERROR (H5E_ARGS, H5E_BADTYPE, FAIL); /*not a file atom*/
-40    }
-41    if (NULL==(file=H5Aatom_object (fid))) {
-42       HRETURN_ERROR (H5E_ATOM, H5E_BADATOM, FAIL); /*can't get file struct*/
-43    }
-    
- -

If something is wrong with the arguments then we raise an - error. We never assert arguments at this level. - We also convert atoms to pointers since atoms are really just a - pointer-hiding mechanism. Functions that can be called - internally always have pointer arguments instead of atoms - because (1) then they don't have to always convert atoms to - pointers, and (2) the various pointer data types provide more - documentation and type checking than just an hid_t - type. - -

-44 
-45    /* do work */
-46    if (H5F_flush (file, invalidate)<0) {
-47       HRETURN_ERROR (H5E_CACHE, H5E_CANTFLUSH, FAIL); /*flush failed*/
-48    }
-    
- -

An internal version of the function does the real work. That - internal version calls assert to check/document - it's arguments and can be called from other library functions. - -

-49 
-50    FUNC_LEAVE (SUCCEED);
-51 }
-    
- -
-
Robb Matzke
- - -Last modified: Mon Nov 10 15:33:33 EST 1997 - - - diff --git a/doc/html/Coding.html b/doc/html/Coding.html deleted file mode 100644 index dbf55bf..0000000 --- a/doc/html/Coding.html +++ /dev/null @@ -1,300 +0,0 @@ - - - HDF5 Naming Scheme - - - - - -

-
HDF5 Naming Scheme for

- -

-

-

-

- Authors: - Quincey Koziol and - - Robb Matzke - -
-
    - - FILES - - -
      - -
    • Source files are named according to the package they contain (see - below). All files will begin with `H5' so we can stuff our - object files into someone else's library and not worry about file - name conflicts. -

      For Example: - -

      H5.c -- "Generic" library functions -
      -
      H5B.c -- B-link tree functions - -

      -

    • If a package is in more than one file, then another name is tacked - on. It's all lower case with no underscores or hyphens. -

      For Example: - -

      H5F.c -- the file for this package -
      -
      H5Fstdio.c -- stdio functions (just an example) -
      -
      H5Ffcntl.c -- fcntl functions (just an example) - -

      -

    • Each package file has a header file of API stuff (unless there is - no API component to the package) -

      For Example: - -

      H5F.h -- things an application would see. -

      - and a header file of private stuff - -

      -

      H5Fprivate.h -- things an application wouldn't see. The - private header includes the public header. - -

      - and a header for private prototypes - -

      -

      H5Fproto.h -- prototypes for internal functions. - -

      - By splitting the prototypes into separate include files we don't - have to recompile everything when just one function prototype - changes. - -

    • The main API header file is `hdf5.h' and it includes each of the - public header files but none of the private header files. Or the - application can include just the public header files it needs. - -
    • There is no main private or prototype header file because it - prevents make from being efficient. Instead, each source file - includes only the private header and prototype files it needs - (first all the private headers, then all the private prototypes). - -
    • Header files should include everything they need and nothing more. - -
    -

    - - PACKAGES - - -

    -Names exported beyond function scope begin with `H5' followed by zero, -one, or two upper-case letters that describe the class of object. -This prefix is the package name. The implementation of packages -doesn't necessarily have to map 1:1 to the source files. -

    - -

    H5 -- library functions -
    -
    H5A -- atoms -
    -
    H5AC -- cache -
    -
    H5B -- B-link trees -
    -
    H5D -- datasets -
    -
    H5E -- error handling -
    -
    H5F -- files -
    -
    H5G -- groups -
    -
    H5M -- meta data -
    -
    H5MM -- core memory management -
    -
    H5MF -- file memory management -
    -
    H5O -- object headers -
    -
    H5P -- Property Lists -
    -
    H5S -- dataspaces -
    -
    H5R -- relationships -
    -
    H5T -- datatype - -

    -Each package implements a single main class of object (e.g., the H5B -package implements B-link trees). The main data type of a package is -the package name followed by `_t'. -

    - -

    H5F_t -- HDF5 file type -
    -
    H5B_t -- B-link tree data type - -

    - -Not all packages implement a data type (H5, H5MF) and some -packages provide access to a preexisting data type (H5MM, H5S). -

    - - - PUBLIC vs PRIVATE - -

    -If the symbol is for internal use only, then the package name is -followed by an underscore and the rest of the name. Otherwise, the -symbol is part of the API and there is no underscore between the -package name and the rest of the name. -

    - -

    H5Fopen -- an API function. -
    -
    H5B_find -- an internal function. - -

    -For functions, this is important because the API functions never pass -pointers around (they use atoms instead for hiding the implementation) -and they perform stringent checks on their arguments. Internal -unctions, on the other hand, check arguments with assert(). -

    -Data types like H5B_t carry no information about whether the type is -public or private since it doesn't matter. - -

    - - - INTEGRAL TYPES - -

    -Integral fixed-point type names are an optional `u' followed by `int' -followed by the size in bits (8, 16, -32, or 64). There is no trailing `_t' because these are common -enough and follow their own naming convention. -

    -

    -
    hbool_t -- boolean values (BTRUE, BFALSE, BFAIL) -
    -
    int8 -- signed 8-bit integers -
    -
    uint8 -- unsigned 8-bit integers -
    -
    int16 -- signed 16-bit integers -
    -
    uint16 -- unsigned 16-bit integers -
    -
    int32 -- signed 32-bit integers -
    -
    uint32 -- unsigned 32-bit integers -
    -
    int64 -- signed 64-bit integers -
    -
    uint64 -- unsigned 64-bit integers -
    -
    intn -- "native" integers -
    -
    uintn -- "native" unsigned integers - -

    -

    - - OTHER TYPES - - -

    - -Other data types are always followed by `_t'. -

    -

    -
    H5B_key_t-- additional data type used by H5B package. -

    -

    - -However, if the name is so common that it's used almost everywhere, -then we make an alias for it by removing the package name and leading -underscore and replacing it with an `h' (the main datatype for a -package already has a short enough name, so we don't have aliases for -them). -

    -

    -
    typedef H5E_err_t herr_t; -

    -

    - - GLOBAL VARIABLES - -

    -Global variables include the package name and end with `_g'. -

    -

    -
    H5AC_methods_g -- global variable in the H5AC package. -

    -

    - - - - -MACROS, PREPROCESSOR CONSTANTS, AND ENUM MEMBERS - - -

    -Same rules as other symbols except the name is all upper case. There -are a few exceptions:
    -

      -
    • Constants and macros defined on a system that is deficient: -

      -
      MIN(x,y), MAX(x,y) and their relatives -

      - -
    • Platform constants : -

      - No naming scheme; determined by OS and compiler.
      - These appear only in one header file anyway. -

      -

    • Feature test constants (?)
      - Always start with `HDF5_HAVE_' like HDF5_HAVE_STDARG_H for a - header file, or HDF5_HAVE_DEV_T for a data type, or - HDF5_HAVE_DIV for a function. -
    -

    - -

-

-

-
- This file /hdf3/web/hdf/internal/HDF_standard/HDF5.coding_standard.html is - maintained by Elena Pourmal - epourmal@ncsa.uiuc.edu . -
-

-

- Last modified August 5, 1997 -
- -
- - - diff --git a/doc/html/Copyright.html b/doc/html/Copyright.html deleted file mode 100644 index fdc8368..0000000 --- a/doc/html/Copyright.html +++ /dev/null @@ -1,121 +0,0 @@ - - - - - HDF5 Copyright Notice - - - - - - - -
- -

Copyright Notice and Statement for -
-NCSA HDF5 (Hierarchical Data Format 5) Software Library and Utilities

-
-

- - -NCSA HDF5 (Hierarchical Data Format 5) Software Library and Utilities -
-Copyright 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005 by -the Board of Trustees of the University of Illinois -
-All rights reserved. -

- -Contributors: National Center for Supercomputing Applications (NCSA) at -the University of Illinois at Urbana-Champaign (UIUC), Lawrence Livermore -National Laboratory (LLNL), Sandia National Laboratories (SNL), Los Alamos -National Laboratory (LANL), Jean-loup Gailly and Mark Adler (gzip library). -

- -Redistribution and use in source and binary forms, with or without -modification, are permitted for any purpose (including commercial purposes) -provided that the following conditions are met: -

- -

    -
  1. Redistributions of source code must retain the above copyright notice, -this list of conditions, and the following disclaimer. - -
  2. Redistributions in binary form must reproduce the above copyright -notice, this list of conditions, and the following disclaimer in the -documentation and/or materials provided with the distribution. - -
  3. In addition, redistributions of modified forms of the source or binary -code must carry prominent notices stating that the original code was -changed and the date of the change. - -
  4. All publications or advertising materials mentioning features or use of -this software are asked, but not required, to acknowledge that it was -developed by the National Center for Supercomputing Applications at the -University of Illinois at Urbana-Champaign and to credit the contributors. - -
  5. Neither the name of the University nor the names of the Contributors may -be used to endorse or promote products derived from this software without -specific prior written permission from the University or the Contributors, -as appropriate for the name(s) to be used. - -
  6. THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY AND THE CONTRIBUTORS "AS IS" -WITH NO WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED. In no event -shall the University or the Contributors be liable for any damages suffered -by the users arising out of the use of this software, even if advised of -the possibility of such damage. - -
- - - - -
-Portions of HDF5 were developed with support from the University of -California, Lawrence Livermore National Laboratory (UC LLNL). -The following statement applies to those portions of the product -and must be retained in any redistribution of source code, binaries, -documentation, and/or accompanying materials: - - - This work was partially produced at the University of California, - Lawrence Livermore National Laboratory (UC LLNL) under contract no. - W-7405-ENG-48 (Contract 48) between the U.S. Department of Energy - (DOE) and The Regents of the University of California (University) - for the operation of UC LLNL. -

- DISCLAIMER: - This work was prepared as an account of work sponsored by an agency - of the United States Government. Neither the United States - Government nor the University of California nor any of their - employees, makes any warranty, express or implied, or assumes any - liability or responsibility for the accuracy, completeness, or - usefulness of any information, apparatus, product, or process - disclosed, or represents that its use would not infringe privately- - owned rights. Reference herein to any specific commercial products, - process, or service by trade name, trademark, manufacturer, or - otherwise, does not necessarily constitute or imply its endorsement, - recommendation, or favoring by the United States Government or the - University of California. The views and opinions of authors - expressed herein do not necessarily state or reflect those of the - United States Government or the University of California, and shall - not be used for advertising or product endorsement purposes. -

- -
- -
-HDF Help Desk -
-Describes HDF5 Release 1.7, the unreleased development branch; working toward HDF5 Release 1.8.0 -
- - - - - diff --git a/doc/html/Datasets.html b/doc/html/Datasets.html deleted file mode 100644 index eca195d..0000000 --- a/doc/html/Datasets.html +++ /dev/null @@ -1,954 +0,0 @@ - - - - Dataset Interface (H5D) - - - - - - - - - - -
-
- - - -
- HDF5 documents and links 
- Introduction to HDF5 
- HDF5 Reference Manual 
- HDF5 User's Guide for Release 1.6 
- -
- And in this document, the - HDF5 User's Guide from Release 1.4.5:     -
- Files   - Datasets   - Datatypes   - Dataspaces   - Groups   -
- References   - Attributes   - Property Lists   - Error Handling   -
- Filters   - Caching   - Chunking   - Mounting Files   -
- Performance   - Debugging   - Environment   - DDL   -
-
-
-

The Dataset Interface (H5D)

- -

1. Introduction

- -

The purpose of the dataset interface is to provide a mechanism - to describe properties of datasets and to transfer data between - memory and disk. A dataset is composed of a collection of raw - data points and four classes of meta data to describe the data - points. The interface is hopefully designed in such a way as to - allow new features to be added without disrupting current - applications that use the dataset interface. - -

The four classes of meta data are: - -

-
Constant Meta Data -
Meta data that is created when the dataset is created and - exists unchanged for the life of the dataset. For instance, - the datatype of stored array elements is defined when the - dataset is created and cannot be subsequently changed. - -
Persistent Meta Data -
Meta data that is an integral and permanent part of a - dataset but can change over time. For instance, the size in - any dimension can increase over time if such an increase is - allowed when the dataset was created. - -
Memory Meta Data -
Meta data that exists to describe how raw data is organized - in the application's memory space. For instance, the data - type of elements in an application array might not be the same - as the datatype of those elements as stored in the HDF5 file. - -
Transport Meta Data -
Meta data that is used only during the transfer of raw data - from one location to another. For instance, the number of - processes participating in a collective I/O request or hints - to the library to control caching of raw data. -
- -

Each of these classes of meta data is handled differently by - the library although the same API might be used to create them. - For instance, the datatype exists as constant meta data and as - memory meta data; the same API (the H5T API) is - used to manipulate both pieces of meta data but they're handled - by the dataset API (the H5D API) in different - manners. - - - -

2. Storage Layout Properties

- -

The dataset API partitions these terms on three orthogonal axes - (layout, compression, and external storage) and uses a - dataset creation property list to hold the various - settings and pass them through the dataset interface. This is - similar to the way HDF5 files are created with a file creation - property list. A dataset creation property list is always - derived from the default dataset creation property list (use - H5Pcreate() to get a copy of the default property - list) by modifying properties with various - H5Pset_property() functions. - -

-
herr_t H5Pset_layout (hid_t plist_id, - H5D_layout_t layout) -
The storage layout is a piece of constant meta data that - describes what method the library uses to organize the raw - data on disk. The default layout is contiguous storage. - -

-
-
H5D_COMPACT    (Not yet implemented.) -
The raw data is presumably small and can be stored - directly in the object header. Such data is - non-extendible, non-compressible, non-sparse, and cannot - be stored externally. Most of these restrictions are - arbitrary but are enforced because of the small size of - the raw data. Storing data in this format eliminates the - disk seek/read request normally necessary to read raw - data. - -

-
H5D_CONTIGUOUS -
The raw data is large, non-extendible, non-compressible, - non-sparse, and can be stored externally. This is the - default value for the layout property. The term - large means that it may not be possible to hold - the entire dataset in memory. The non-compressibility is - a side effect of the data being large, contiguous, and - fixed-size at the physical level, which could cause - partial I/O requests to be extremely expensive if - compression were allowed. - -

-
H5D_CHUNKED -
The raw data is large and can be extended in any - dimension at any time (provided the data space also allows - the extension). It may be sparse at the chunk level (each - chunk is non-sparse, but there might only be a few chunks) - and each chunk can be compressed and/or stored externally. - A dataset is partitioned into chunks so each chunk is the - same logical size. The chunks are indexed by a B-tree and - are allocated on demand (although it might be useful to be - able to preallocate storage for parts of a chunked array - to reduce contention for the B-tree in a parallel - environment). The chunk size must be defined with - H5Pset_chunk(). - -

-
others... -
Other layout types may be defined later without breaking - existing code. However, to be able to correctly read or - modify data stored with one of these new layouts, the - application will need to be linked with a new version of - the library. This happens automatically on systems with - dynamic linking. -
-
- - -

Once the general layout is defined, the user can define - - properties of that layout. Currently, the only layout that has - user-settable properties is the H5D_CHUNKED layout, - which needs to know the dimensionality and chunk size. -

-
herr_t H5Pset_chunk (hid_t plist_id, int - ndims, hsize_t dim[]) -
This function defines the logical size of a chunk for - chunked layout. If the layout property is set to - H5D_CHUNKED and the chunk size is set to - dim. The number of elements in the dim array - is the dimensionality, ndims. One need not call - H5Dset_layout() when using this function since - the chunked layout is implied. -
- -

-

- - - - - -

Example: Chunked Storage

-

This example shows how a two-dimensional dataset - is partitioned into chunks. The library can manage file - memory by moving the chunks around, and each chunk could be - compressed. The chunks are allocated in the file on demand - when data is written to the chunk. -

- Chunked Storage -
- -

-size_t hsize[2] = {1000, 1000};
-plist = H5Pcreate (H5P_DATASET_CREATE);
-H5Pset_chunk (plist, 2, size);
-	      
-
-
- - -

Although it is most efficient if I/O requests are aligned on chunk - boundaries, this is not a constraint. The application can perform I/O - on any set of data points as long as the set can be described by the - data space. The set on which I/O is performed is called the - selection. - -

3. Compression Properties

- -

Chunked data storage - (see H5Pset_chunk) - allows data compression as defined by the function - H5Pset_deflate. - - - -

-

herr_t H5Pset_deflate (hid_t plist_id, - int level) -
int H5Pget_deflate (hid_t plist_id) -
These functions set or query the deflate level of - dataset creation property list plist_id. The - H5Pset_deflate() sets the compression method to - H5Z_DEFLATE and sets the compression level to - some integer between one and nine (inclusive). One results in - the fastest compression while nine results in the best - compression ratio. The default value is six if - H5Pset_deflate() isn't called. The - H5Pget_deflate() returns the compression level - for the deflate method, or negative if the method is not the - deflate method. - - -

4. External Storage Properties

- -

Some storage formats may allow storage of data across a set of - non-HDF5 files. Currently, only the H5D_CONTIGUOUS storage - format allows external storage. A set segments (offsets and sizes) in - one or more files is defined as an external file list, or EFL, - and the contiguous logical addresses of the data storage are mapped onto - these segments. - -

-
herr_t H5Pset_external (hid_t plist, const - char *name, off_t offset, hsize_t - size) -
This function adds a new segment to the end of the external - file list of the specified dataset creation property list. The - segment begins a byte offset of file name and - continues for size bytes. The space represented by this - segment is adjacent to the space already represented by the external - file list. The last segment in a file list may have the size - H5F_UNLIMITED, in which case the external file may be - of unlimited size and no more files can be added to the external files list. - -

-
int H5Pget_external_count (hid_t plist) -
Calling this function returns the number of segments in an - external file list. If the dataset creation property list has no - external data then zero is returned. - -

-
herr_t H5Pget_external (hid_t plist, unsigned - idx, size_t name_size, char *name, off_t - *offset, hsize_t *size) -
This is the counterpart for the H5Pset_external() - function. Given a dataset creation property list and a zero-based - index into that list, the file name, byte offset, and segment size are - returned through non-null arguments. At most name_size - characters are copied into the name argument which is not - null terminated if the file name is longer than the supplied name - buffer (this is similar to strncpy()). -
- -

-

- - - - - -

Example: Multiple Segments

-

This example shows how a contiguous, one-dimensional dataset - is partitioned into three parts and each of those parts is - stored in a segment of an external file. The top rectangle - represents the logical address space of the dataset - while the bottom rectangle represents an external file. -

- Multiple Segments -
- -

-plist = H5Pcreate (H5P_DATASET_CREATE);
-H5Pset_external (plist, "velocity.data", 3000, 1000);
-H5Pset_external (plist, "velocity.data", 0, 2500);
-H5Pset_external (plist, "velocity.data", 4500, 1500);
-	      
- -

One should note that the segments are defined in order of the - logical addresses they represent, not their order within the - external file. It would also have been possible to put the - segments in separate files. Care should be taken when setting - up segments in a single file since the library doesn't - automatically check for segments that overlap. -

-
- -

-

- - - - - -

Example: Multi-Dimensional

-

This example shows how a contiguous, two-dimensional dataset - is partitioned into three parts and each of those parts is - stored in a separate external file. The top rectangle - represents the logical address space of the dataset - while the bottom rectangles represent external files. -

- Multiple Dimensions -
- -

-plist = H5Pcreate (H5P_DATASET_CREATE);
-H5Pset_external (plist, "scan1.data", 0, 24);
-H5Pset_external (plist, "scan2.data", 0, 24);
-H5Pset_external (plist, "scan3.data", 0, 16);
-	      
- -

The library maps the multi-dimensional array onto a linear - address space like normal, and then maps that address space - into the segments defined in the external file list. -

-
- -

The segments of an external file can exist beyond the end of the - file. The library reads that part of a segment as zeros. When writing - to a segment that exists beyond the end of a file, the file is - automatically extended. Using this feature, one can create a segment - (or set of segments) which is larger than the current size of the - dataset, which allows to dataset to be extended at a future time - (provided the data space also allows the extension). - -

All referenced external data files must exist before performing raw - data I/O on the dataset. This is normally not a problem since those - files are being managed directly by the application, or indirectly - through some other library. - - -

5. Datatype

- -

Raw data has a constant datatype which describes the datatype - of the raw data stored in the file, and a memory datatype that - describes the datatype stored in application memory. Both data - types are manipulated with the H5T API. - -

The constant file datatype is associated with the dataset when - the dataset is created in a manner described below. Once - assigned, the constant datatype can never be changed. - -

The memory datatype is specified when data is transferred - to/from application memory. In the name of data sharability, - the memory datatype must be specified, but can be the same - type identifier as the constant datatype. - -

During dataset I/O operations, the library translates the raw - data from the constant datatype to the memory datatype or vice - versa. Structured datatypes include member offsets to allow - reordering of struct members and/or selection of a subset of - members and array datatypes include index permutation - information to allow things like transpose operations (the - prototype does not support array reordering) Permutations - are relative to some extrinsic descritpion of the dataset. - - - -

6. Data Space

- -

The dataspace of a dataset defines the number of dimensions - and the size of each dimension and is manipulated with the - H5S API. The simple dataspace consists of - maximum dimension sizes and actual dimension sizes, which are - usually the same. However, maximum dimension sizes can be the - constant H5D_UNLIMITED in which case the actual - dimension size can be incremented with calls to - H5Dextend(). The maximium dimension sizes are - constant meta data while the actual dimension sizes are - persistent meta data. Initial actual dimension sizes are - supplied at the same time as the maximum dimension sizes when - the dataset is created. - -

The dataspace can also be used to define partial I/O - operations. Since I/O operations have two end-points, the raw - data transfer functions take two data space arguments: one which - describes the application memory data space or subset thereof - and another which describes the file data space or subset - thereof. - - -

7. Setting Constant or Persistent Properties

- -

Each dataset has a set of constant and persistent properties - which describe the layout method, pre-compression - transformation, compression method, datatype, external storage, - and data space. The constant properties are set as described - above in a dataset creation property list whose identifier is - passed to H5Dcreate(). - -

-
hid_t H5Dcreate (hid_t file_id, const char - *name, hid_t type_id, hid_t - space_id, hid_t create_plist_id) -
A dataset is created by calling H5Dcreate with - a file identifier, a dataset name, a datatype, a dataspace, - and constant properties. The datatype and dataspace are the - type and space of the dataset as it will exist in the file, - which may be different than in application memory. - Dataset names within a group must be unique: - H5Dcreate returns an error if a dataset with the - name specified in name already exists - at the location specified in file_id. - The create_plist_id is a H5P_DATASET_CREATE - property list created with H5Pcreate() and - initialized with the various functions described above. - H5Dcreate() returns a dataset handle for success - or negative for failure. The handle should eventually be - closed by calling H5Dclose() to release resources - it uses. - -

-
hid_t H5Dopen (hid_t file_id, const char - *name) -
An existing dataset can be opened for access by calling this - function. A dataset handle is returned for success or a - negative value is returned for failure. The handle should - eventually be closed by calling H5Dclose() to - release resources it uses. - -

-
herr_t H5Dclose (hid_t dataset_id) -
This function closes a dataset handle and releases all - resources it might have been using. The handle should not be - used in subsequent calls to the library. - -

-
herr_t H5Dextend (hid_t dataset_id, - hsize_t dim[]) -
This function extends a dataset by increasing the size in - one or more dimensions. Not all datasets can be extended. -
- - - -

8. Querying Constant or Persistent Properties

- -

Constant or persistent properties can be queried with a set of - three functions. Each function returns an identifier for a copy - of the requested properties. The identifier can be passed to - various functions which modify the underlying object to derive a - new object; the original dataset is completely unchanged. The - return values from these functions should be properly destroyed - when no longer needed. - -

-
hid_t H5Dget_type (hid_t dataset_id) -
Returns an identifier for a copy of the dataset permanent - datatype or negative for failure. - -
hid_t H5Dget_space (hid_t dataset_id) -
Returns an identifier for a copy of the dataset permanent - data space, which also contains information about the current - size of the dataset if the data set is extendable with - H5Dextend(). - -
hid_t H5Dget_create_plist (hid_t - dataset_id) -
Returns an identifier for a copy of the dataset creation - property list. The new property list is created by examining - various permanent properties of the dataset. This is mostly a - catch-all for everything but type and space. -
- - - -

9. Setting Memory and Transfer Properties

- -

A dataset also has memory properties which describe memory - within the application, and transfer properties that control - various aspects of the I/O operations. The memory can have a - datatype different than the permanent file datatype (different - number types, different struct member offsets, different array - element orderings) and can also be a different size (memory is a - subset of the permanent dataset elements, or vice versa). The - transfer properties might provide caching hints or collective - I/O information. Therefore, each I/O operation must specify - memory and transfer properties. - -

The memory properties are specified with type_id and - space_id arguments while the transfer properties are - specified with the transfer_id property list for the - H5Dread() and H5Dwrite() functions - (these functions are described below). - -

-
herr_t H5Pset_buffer (hid_t xfer_plist, - hsize_t max_buf_size, void *tconv_buf, void - *bkg_buf) -
hsize_t H5Pget_buffer (hid_t xfer_plist, void - **tconv_buf, void **bkg_buf) -
Sets or retrieves the maximum size in bytes of the temporary - buffer used for datatype conversion in the I/O pipeline. An - application-defined buffer can also be supplied as the - tconv_buf argument, otherwise a buffer will be - allocated and freed on demand by the library. A second - temporary buffer bkg_buf can also be supplied and - should be the same size as the tconv_buf. The - default values are 1MB for the maximum buffer size, and null - pointers for each buffer indicating that they should be - allocated on demand and freed when no longer needed. The - H5Pget_buffer() function returns the maximum - buffer size or zero on error. -
- -

If the maximum size of the temporary I/O pipeline buffers is - too small to hold the entire I/O request, then the I/O request - will be fragmented and the transfer operation will be strip - mined. However, certain restrictions apply to the strip - mining. For instance, when performing I/O on a hyperslab of a - simple data space the strip mining is in terms of the slowest - varying dimension. So if a 100x200x300 hyperslab is requested, - the temporary buffer must be large enough to hold a 1x200x300 - sub-hyperslab. - -

To prevent strip mining from happening, the application should - use H5Pset_buffer() to set the size of the - temporary buffer so it's large enough to hold the entire - request. - -

-

- - - - - -

Example

-

This example shows how to define a function that sets - a dataset transfer property list so that strip mining - does not occur. It takes an (optional) dataset transfer - property list, a dataset, a data space that describes - what data points are being transfered, and a datatype - for the data points in memory. It returns a (new) - dataset transfer property list with the temporary - buffer size set to an appropriate value. The return - value should be passed as the fifth argument to - H5Dread() or H5Dwrite(). -

- 1 hid_t
- 2 disable_strip_mining (hid_t xfer_plist, hid_t dataset,
- 3                       hid_t space, hid_t mem_type)
- 4 {
- 5     hid_t file_type;          /* File datatype */
- 6     size_t type_size;         /* Sizeof larger type */
- 7     size_t size;              /* Temp buffer size */
- 8     hid_t xfer_plist;         /* Return value */
- 9 
-10     file_type = H5Dget_type (dataset);
-11     type_size = MAX(H5Tget_size(file_type), H5Tget_size(mem_type));
-12     H5Tclose (file_type);
-13     size = H5Sget_npoints(space) * type_size;
-14     if (xfer_plist<0) xfer_plist = H5Pcreate (H5P_DATASET_XFER);
-15     H5Pset_buffer(xfer_plist, size, NULL, NULL);
-16     return xfer_plist;
-17 }
-	      
-
-
- - - -

10. Querying Memory or Transfer Properties

- -

Unlike constant and persistent properties, a dataset cannot be - queried for it's memory or transfer properties. Memory - properties cannot be queried because the application already - stores those properties separate from the buffer that holds the - raw data, and the buffer may hold multiple segments from various - datasets and thus have more than one set of memory properties. - The transfer properties cannot be queried from the dataset - because they're associated with the transfer itself and not with - the dataset (but one can call - H5Pget_property() to query transfer - properties from a tempalate). - - -

11. Raw Data I/O

- -

All raw data I/O is accomplished through these functions which - take a dataset handle, a memory datatype, a memory data space, - a file data space, transfer properties, and an application - memory buffer. They translate data between the memory datatype - and space and the file datatype and space. The data spaces can - be used to describe partial I/O operations. - -

-
herr_t H5Dread (hid_t dataset_id, hid_t - mem_type_id, hid_t mem_space_id, hid_t - file_space_id, hid_t xfer_plist_id, - void *buf/*out*/) -
Reads raw data from the specified dataset into buf - converting from file datatype and space to memory datatype - and space. - -

-
herr_t H5Dwrite (hid_t dataset_id, hid_t - mem_type_id, hid_t mem_space_id, hid_t - file_space_id, hid_t xfer_plist_id, - const void *buf) -
Writes raw data from an application buffer buf to - the specified dataset converting from memory datatype and - space to file datatype and space. -
- - -

In the name of sharability, the memory datatype must be - supplied. However, it can be the same identifier as was used to - create the dataset or as was returned by - H5Dget_type(); the library will not implicitly - derive memory datatypes from constant datatypes. - -

For complete reads of the dataset one may supply - H5S_ALL as the argument for the file data space. - If H5S_ALL is also supplied as the memory data - space then no data space conversion is performed. This is a - somewhat dangerous situation since the file data space might be - different than what the application expects. - - - -

12. Examples

- -

The examples in this section illustrate some common dataset - practices. - - -

This example shows how to create a dataset which is stored in - memory as a two-dimensional array of native double - values but is stored in the file in Cray float - format using LZ77 compression. The dataset is written to the - HDF5 file and then read back as a two-dimensional array of - float values. - -

-

- - - - - -

Example 1

-

- 1 hid_t file, data_space, dataset, properties;
- 2 double dd[500][600];
- 3 float ff[500][600];
- 4 hsize_t dims[2], chunk_size[2];
- 5 
- 6 /* Describe the size of the array */
- 7 dims[0] = 500;
- 8 dims[1] = 600;
- 9 data_space = H5Screate_simple (2, dims);
-10 
-11 
-12 /*
-13  * Create a new file using with read/write access,
-14  * default file creation properties, and default file
-15  * access properties.
-16  */
-17 file = H5Fcreate ("test.h5", H5F_ACC_RDWR, H5P_DEFAULT,
-18                   H5P_DEFAULT);
-19 
-20 /* 
-21  * Set the dataset creation plist to specify that
-22  * the raw data is to be partitioned into 100x100 element
-23  * chunks and that each chunk is to be compressed with
-24  * LZ77.
-25  */
-26 chunk_size[0] = chunk_size[1] = 100;
-27 properties = H5Pcreate (H5P_DATASET_CREATE);
-28 H5Pset_chunk (properties, 2, chunk_size);
-29 H5Pset_deflate (properties, 9);
-30 
-31 /*
-32  * Create a new dataset within the file.  The datatype
-33  * and data space describe the data on disk, which may
-34  * be different than the format used in the application's
-35  * memory.
-36  */
-37 dataset = H5Dcreate (file, "dataset", H5T_CRAY_FLOAT,
-38                      data_space, properties);
-39 
-40 /*
-41  * Write the array to the file.  The datatype and data
-42  * space describe the format of the data in the `dd'
-43  * buffer.  The raw data is translated to the format
-44  * required on disk defined above.  We use default raw
-45  * data transfer properties.
-46  */
-47 H5Dwrite (dataset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL,
-48           H5P_DEFAULT, dd);
-49 
-50 /*
-51  * Read the array as floats.  This is similar to writing
-52  * data except the data flows in the opposite direction.
-53  */
-54 H5Dread (dataset, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL,
-55          H5P_DEFAULT, ff);
-56 
-64 H5Dclose (dataset);
-65 H5Sclose (data_space);
-66 H5Pclose (properties);
-67 H5Fclose (file);
-	      
-
-
- -

This example uses the file created in Example 1 and reads a - hyperslab of the 500x600 file dataset. The hyperslab size is - 100x200 and it is located beginning at element - <200,200>. We read the hyperslab into an 200x400 array in - memory beginning at element <0,0> in memory. Visually, - the transfer looks something like this: - -

- Raw Data Transfer -
- -

-

- - - - - -

Example 2

-

- 1 hid_t file, mem_space, file_space, dataset;
- 2 double dd[200][400];
- 3 hsize_t offset[2];
- 4 hsize size[2];
- 5 
- 6 /*
- 7  * Open an existing file and its dataset.
- 8  */
- 9 file = H5Fopen ("test.h5", H5F_ACC_RDONLY, H5P_DEFAULT);
-10 dataset = H5Dopen (file, "dataset");
-11 
-12 /*
-13  * Describe the file data space.
-14  */
-15 offset[0] = 200; /*offset of hyperslab in file*/
-16 offset[1] = 200;
-17 size[0] = 100;   /*size of hyperslab*/
-18 size[1] = 200;
-19 file_space = H5Dget_space (dataset);
-20 H5Sselect_hyperslab (file_space, H5S_SELECT_SET, offset, NULL, size, NULL);
-21 
-22 /*
-23  * Describe the memory data space.
-24  */
-25 size[0] = 200;  /*size of memory array*/
-26 size[1] = 400;
-27 mem_space = H5Screate_simple (2, size);
-28 
-29 offset[0] = 0;  /*offset of hyperslab in memory*/
-30 offset[1] = 0;
-31 size[0] = 100;  /*size of hyperslab*/
-32 size[1] = 200;
-33 H5Sselect_hyperslab (mem_space, H5S_SELECT_SET, offset, NULL, size, NULL);
-34 
-35 /*
-36  * Read the dataset.
-37  */
-38 H5Dread (dataset, H5T_NATIVE_DOUBLE, mem_space,
-39          file_space, H5P_DEFAULT, dd);
-40 
-41 /*
-42  * Close/release resources.
-43  */
-44 H5Dclose (dataset);
-45 H5Sclose (mem_space);
-46 H5Sclose (file_space);
-47 H5Fclose (file);
-	      
-
-
- -

If the file contains a compound data structure one of whose - members is a floating point value (call it "delta") but the - application is interested in reading an array of floating point - values which are just the "delta" values, then the application - should cast the floating point array as a struct with a single - "delta" member. - -

-

- - - - - -

Example 3

-

- 1 hid_t file, dataset, type;
- 2 double delta[200];
- 3 
- 4 /*
- 5  * Open an existing file and its dataset.
- 6  */
- 7 file = H5Fopen ("test.h5", H5F_ACC_RDONLY, H5P_DEFAULT);
- 8 dataset = H5Dopen (file, "dataset");
- 9 
-10 /*
-11  * Describe the memory datatype, a struct with a single
-12  * "delta" member.
-13  */
-14 type = H5Tcreate (H5T_COMPOUND, sizeof(double));
-15 H5Tinsert (type, "delta", 0, H5T_NATIVE_DOUBLE);
-16 
-17 /*
-18  * Read the dataset.
-19  */
-20 H5Dread (dataset, type, H5S_ALL, H5S_ALL,
-21          H5P_DEFAULT, dd);
-22 
-23 /*
-24  * Close/release resources.
-25  */
-26 H5Dclose (dataset);
-27 H5Tclose (type);
-28 H5Fclose (file);
-	      
-
-
- - -
-
- - - -
- HDF5 documents and links 
- Introduction to HDF5 
- HDF5 Reference Manual 
- HDF5 User's Guide for Release 1.6 
- -
- And in this document, the - HDF5 User's Guide from Release 1.4.5:     -
- Files   - Datasets   - Datatypes   - Dataspaces   - Groups   -
- References   - Attributes   - Property Lists   - Error Handling   -
- Filters   - Caching   - Chunking   - Mounting Files   -
- Performance   - Debugging   - Environment   - DDL   -
-
-
-
-HDF Help Desk -
-Describes HDF5 Release 1.4.5, February 2003 -
- - Last modified: 2 March 2001 - - - - - diff --git a/doc/html/Dataspaces.html b/doc/html/Dataspaces.html deleted file mode 100644 index c83d285..0000000 --- a/doc/html/Dataspaces.html +++ /dev/null @@ -1,742 +0,0 @@ - - - - Dataspace Interface (H5S) - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - -
- HDF5 documents and links 
- Introduction to HDF5 
- HDF5 Reference Manual 
- HDF5 User's Guide for Release 1.6 
- -
- And in this document, the - HDF5 User's Guide from Release 1.4.5:     -
- Files   - Datasets   - Datatypes   - Dataspaces   - Groups   -
- References   - Attributes   - Property Lists   - Error Handling   -
- Filters   - Caching   - Chunking   - Mounting Files   -
- Performance   - Debugging   - Environment   - DDL   -
-
-
-

-The Dataspace Interface (H5S)

- -

-1. Introduction

-The dataspace interface (H5S) provides a mechanism to describe the positions -of the elements of a dataset and is designed in such a way as to allow -new features to be easily added without disrupting applications that use -the dataspace interface. A dataset (defined with the dataset interface) is -composed of a collection of raw data points of homogeneous type, defined in the -datatype (H5T) interface, organized according to the dataspace with this -interface. - -

A dataspace describes the locations that dataset elements are located at. -A dataspace is either a regular N-dimensional array of data points, -called a simple dataspace, or a more general collection of data -points organized in another manner, called a complex dataspace. -A scalar dataspace is a special case of the simple data -space and is defined to be a 0-dimensional single data point in size. Currently -only scalar and simple dataspaces are supported with this version -of the H5S interface. -Complex dataspaces will be defined and implemented in a future -version. Complex dataspaces are intended to be used for such structures -which are awkward to express in simple dataspaces, such as irregularly -gridded data or adaptive mesh refinement data. This interface provides -functions to set and query properties of a dataspace. - -

Operations on a dataspace include defining or extending the extent of -the dataspace, selecting portions of the dataspace for I/O and storing the -dataspaces in the file. The extent of a dataspace is the range of coordinates -over which dataset elements are defined and stored. Dataspace selections are -subsets of the extent (up to the entire extent) which are selected for some -operation. - -

For example, a 2-dimensional dataspace with an extent of 10 by 10 may have -the following very simple selection: -

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
0 1 2 3 4 5 6 7 8 9
0----------
1-XXX------
2-XXX------
3-XXX------
4-XXX------
5-XXX------
6----------
7----------
8----------
9----------
-
Example 1: Contiguous rectangular selection -
- - -
Or, a more complex selection may be defined: -

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
0 1 2 3 4 5 6 7 8 9
0----------
1-XXX--X---
2-X-X------
3-X-X--X---
4-X-X------
5-XXX--X---
6----------
7--XXXX----
8----------
9----------
-
Example 2: Non-contiguous selection -
- -

Selections within dataspaces have an offset within the extent which is used -to locate the selection within the extent of the dataspace. Selection offsets -default to 0 in each dimension, but may be changed to move the selection within -a dataspace. In example 2 above, if the offset was changed to 1,1, the selection -would look like this: -

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
0 1 2 3 4 5 6 7 8 9
0----------
1----------
2--XXX--X--
3--X-X-----
4--X-X--X--
5--X-X-----
6--XXX--X--
7----------
8---XXXX---
9----------
-
Example 3: Non-contiguous selection with 1,1 offset -
- -

Selections also have a linearization ordering of the points selected -(defaulting to "C" order, ie. last dimension changing fastest). The -linearization order may be specified for each point or it may be chosen by -the axis of the dataspace. For example, with the default "C" ordering, -example 1's selected points are iterated through in this order: (1,1), (1,2), -(1,3), (2,1), (2,2), etc. With "FORTRAN" ordering, example 1's selected points -would be iterated through in this order: (1,1), (2,1), (3,1), (4,1), (5,1), -(1,2), (2,2), etc. - -

A dataspace may be stored in the file as a permanent object, to allow many -datasets to use a commonly defined dataspace. Dataspaces with extendable -extents (ie. unlimited dimensions) are not able to be stored as permanent -dataspaces. - -

Dataspaces may be created using an existing permanent dataspace as a -container to locate the new dataspace within. These dataspaces are complete -dataspaces and may be used to define datasets. A dataspaces with a "parent" -can be queried to determine the parent dataspace and the location within the -parent. These dataspaces must currently be the same number of dimensions as -the parent dataspace. - -

2. General Dataspace Operations

-The functions defined in this section operate on dataspaces as a whole. -New dataspaces can be created from scratch or copied from existing data -spaces. When a dataspace is no longer needed its resources should be released -by calling H5Sclose(). -
- -
-hid_t H5Screate(H5S_class_t type)
- -
- This function creates a new dataspace of a particular type. The -types currently supported are H5S_SCALAR and H5S_SIMPLE; -others are planned to be added later. -
- - - -
-hid_t H5Scopy (hid_t space)
- -
- This function creates a new dataspace which is an exact copy of the -dataspace space. -
- - - - - -
-herr_t H5Sclose (hid_t space)
- -
-Releases resources associated with a dataspace. Subsequent use of the -dataspace identifier after this call is undefined. -
- - - -
- - -

3. Dataspace Extent Operations

-These functions operate on the extent portion of a dataspace. - -
-
-herr_t H5Sset_extent_simple (hid_t space, int rank, const hsize_t -*current_size, const hsize_t *maximum_size)
- -
-Sets or resets the size of an existing dataspace, where rank is -the dimensionality, or number of dimensions, of the dataspace. -current_size is an array of size rank which contains the new size -of each dimension in the dataspace. maximum_size is an array of size -rank which contains the maximum size of each dimension in the dataspace. -Any previous extent is removed from the dataspace, the dataspace type is set to -H5S_SIMPLE and the extent is set as specified. -
- -
-herr_t H5Sset_extent_none (hid_t space)
- -
-Removes the extent from a dataspace and sets the type to H5S_NO_CLASS. -
- -
-herr_t H5Sextent_copy (hid_t dest_space, - hid_t source_space)
- -
-Copies the extent from source_space to dest_space, which may -change the type of the dataspace. Returns non-negative on success, negative on -failure. -
- -
-hsize_t H5Sget_simple_extent_npoints (hid_t space)
- -
-This function determines the number of elements in a dataspace. For example, a -simple 3-dimensional dataspace with dimensions 2, 3 and 4 would have 24 -elements. -Returns the number of elements in the dataspace, negative on failure. -
- -
-int H5Sget_simple_extent_ndims (hid_t space)
- -
-This function determines the dimensionality (or rank) of a dataspace. -Returns the number of dimensions in the dataspace, negative on failure. -
- -
-herr_t H5Sget_simple_extent_dims (hid_t space, hsize_t *dims, - hsize_t *max)
- -
-The function retrieves the size of the extent of the dataspace space by -placing the size of each dimension in the array dims. Also retrieves -the size of the maximum extent of the dataspace, placing the results in -max. -Returns non-negative on success, negative on failure. -
- -
- -

4. Dataspace Selection Operations

-Selections are maintained separately from extents in dataspaces and operations -on the selection of a dataspace do not affect the extent of the dataspace. -Selections are independent of extent type and the boundaries of selections are -reconciled with the extent at the time of the data transfer. Selection offsets -apply a selection to a location within an extent, allowing the same selection -to be moved within the extent without requiring a new selection to be specified. -Offsets default to 0 when the dataspace is created. Offsets are applied when -an I/O transfer is performed (and checked during calls to H5Sselect_valid). -Selections have an iteration order for the points selected, which can be any -permutation of the dimensions involved (defaulting to 'C' array order) or a -specific order for the selected points, for selections composed of single array -elements with H5Sselect_elements. - - - -Further methods of selecting -portions of a dataspace may be added in the future. - -
-
-herr_t H5Sselect_hyperslab (hid_t space, h5s_seloper_t op, - const hsize_t * start, const hsize_t * stride, - const hsize_t * count, const hsize_t * block)
- -
-This function selects a hyperslab region to add to the current selected region -for the space dataspace. The start, stride, count -and block arrays must be the same size as the rank of the dataspace. -The selection operator op determines how the new selection is to be -combined with the already existing selection for the dataspace. -Currently, The following operators are supported: - - - -
- H5S_SELECT_SET - - Replaces the existing selection with the parameters from this call. - Overlapping blocks are not supported with this operator. -
- H5S_SELECT_OR - - Adds the new selection to the existing selection. -
-
-

-The start array determines the starting coordinates of the hyperslab -to select. The stride array chooses array locations from the dataspace -with each value in the stride array determining how many elements to move -in each dimension. Setting a value in the stride array to 1 moves to -each element in that dimension of the dataspace, setting a value of 2 in a -location in the stride array moves to every other element in that -dimension of the dataspace. In other words, the stride determines the -number of elements to move from the start location in each dimension. -Stride values of 0 are not allowed. If the stride parameter is NULL, -a contiguous hyperslab is selected (as if each value in the stride array -was set to all 1's). The count array determines how many blocks to -select from the dataspace, in each dimension. The block array determines -the size of the element block selected from the dataspace. If the block -parameter is set to NULL, the block size defaults to a single element -in each dimension (as if the block array was set to all 1's). -

For example, in a 2-dimensional dataspace, setting start to [1,1], -stride to [4,4], count to [3,7] and block to [2,2] selects -21 2x2 blocks of array elements starting with location (1,1) and selecting -blocks at locations (1,1), (5,1), (9,1), (1,5), (5,5), etc. -

Regions selected with this function call default to 'C' order iteration when -I/O is performed. -

- -
-herr_t H5Sselect_elements (hid_t space, h5s_seloper_t op, - const size_t num_elements, const hsize_t *coord[])
- -
-This function selects array elements to be included in the selection for the -space dataspace. The number of elements selected must be set with the -num_elements. The coord array is a two-dimensional array of size -<dataspace rank> by <num_elements> in size (ie. a list of -coordinates in the array). The order of the element coordinates in the -coord array also specifies the order that the array elements are -iterated through when I/O is performed. Duplicate coordinate locations are not -checked for. - -

The selection operator op determines how the new selection is to be -combined with the already existing selection for the dataspace. -The following operators are supported: -

- - -
- H5S_SELECT_SET - - Replaces the existing selection with the parameters from this call. - Overlapping blocks are not supported with this operator. -
- H5S_SELECT_OR - - Adds the new selection to the existing selection. -
-
-When operators other than -H5S_SELECT_SET are used to combine a new selection with an existing selection, -the selection ordering is reset to 'C' array ordering. -
- -
-herr_t H5Sselect_all (hid_t space)
- -
-This function selects the special H5S_SELECT_ALL region for the space -dataspace. H5S_SELECT_ALL selects the entire dataspace for any dataspace is is -applied to. -
- -
-herr_t H5Sselect_none (hid_t space)
- -
-This function resets the selection region for the space -dataspace not to include any elements. -
- -
- - - - -
-htri_t H5Sselect_valid (hid_t space)
- -
-This function verifies that the selection for a dataspace is within the extent -of the dataspace, if the currently set offset for the dataspace is used. -Returns TRUE if the selection is contained within the extent, FALSE if it -is not contained within the extent and FAIL on error conditions (such as if -the selection or extent is not defined). -
- -
-hsize_t H5Sget_select_npoints (hid_t space)
- -
-This function determines the number of elements in the current selection -of a dataspace. -
- -
-herr_t H5Soffset_simple (hid_t space, const hssize_t * - offset)
- -
-Sets the offset of a simple dataspace space. The offset array -must be the same number of elements as the number of dimensions for the -dataspace. If the offset array is set to NULL, the offset -for the dataspace is reset to 0. -
- -
- - -

5. Convenience Dataspace Operation

- -
- - - -
-hid_t H5Screate_simple(int rank, const hsize_t *current_size, - const hsize_t *maximum_size)
- -
- This function is a "convenience" wrapper to create a simple dataspace -and set it's extent in one call. It is equivalent to calling H5Screate -and H5Sset_extent_simple() in two steps. -
- - - - - - - -
- - -
-
- - - -
- HDF5 documents and links 
- Introduction to HDF5 
- HDF5 Reference Manual 
- HDF5 User's Guide for Release 1.6 
- -
- And in this document, the - HDF5 User's Guide from Release 1.4.5:     -
- Files   - Datasets   - Datatypes   - Dataspaces   - Groups   -
- References   - Attributes   - Property Lists   - Error Handling   -
- Filters   - Caching   - Chunking   - Mounting Files   -
- Performance   - Debugging   - Environment   - DDL   -
-
-
-
-HDF Help Desk -
-Describes HDF5 Release 1.4.5, February 2003 -
- -Last modified: 7 May 2002 - - - - - diff --git a/doc/html/Datatypes.html b/doc/html/Datatypes.html deleted file mode 100644 index 232d7fb..0000000 --- a/doc/html/Datatypes.html +++ /dev/null @@ -1,3114 +0,0 @@ - - - - Datatype Interface (H5T) - - - - - - - - - - -
-
- - - -
- HDF5 documents and links 
- Introduction to HDF5 
- HDF5 Reference Manual 
- HDF5 User's Guide for Release 1.6 
- -
- And in this document, the - HDF5 User's Guide from Release 1.4.5:     -
- Files   - Datasets   - Datatypes   - Dataspaces   - Groups   -
- References   - Attributes   - Property Lists   - Error Handling   -
- Filters   - Caching   - Chunking   - Mounting Files   -
- Performance   - Debugging   - Environment   - DDL   -
-
-
-

The Datatype Interface (H5T)

- -

1. Introduction

- -

The datatype interface provides a mechanism to describe the - storage format of individual data points of a data set and is - hopefully designed in such a way as to allow new features to be - easily added without disrupting applications that use the - datatype interface. A dataset (the H5D interface) is composed of a - collection or raw data points of homogeneous type organized - according to the data space (the H5S interface). - -

A datatype is a collection of datatype properties, all of - which can be stored on disk, and which when taken as a whole, - provide complete information for data conversion to or from that - datatype. The interface provides functions to set and query - properties of a datatype. - -

A data point is an instance of a datatype, - which is an instance of a type class. We have defined - a set of type classes and properties which can be extended at a - later time. The atomic type classes are those which describe - types which cannot be decomposed at the datatype interface - level; all other classes are compound. - -

2. General Datatype Operations

- -

The functions defined in this section operate on datatypes as - a whole. New datatypes can be created from scratch or copied - from existing datatypes. When a datatype is no longer needed - its resources should be released by calling H5Tclose(). - -

Datatypes come in two flavors: named datatypes and transient - datatypes. A named datatype is stored in a file while the - transient flavor is independent of any file. Named datatypes - are always read-only, but transient types come in three - varieties: modifiable, read-only, and immutable. The difference - between read-only and immutable types is that immutable types - cannot be closed except when the entire library is closed (the - predefined types like H5T_NATIVE_INT are immutable - transient types). - -

-
hid_t H5Tcreate (H5T_class_t class, size_t - size) -
Datatypes can be created by calling this - function, where class is a datatype class - identifier. However, the only class currently allowed is - H5T_COMPOUND to create a new empty compound - datatype where size is the total size in bytes of an - instance of this datatype. Other datatypes are created with - H5Tcopy(). All functions that return datatype - identifiers return a negative value for failure. - -

-
hid_t H5Topen (hid_t location, const char - *name) -
A named datatype can be opened by calling this function, - which returns a datatype identifier. The identifier should - eventually be released by calling H5Tclose() to - release resources. The named datatype returned by this - function is read-only or a negative value is returned for - failure. The location is either a file or group - identifier. - -

-
herr_t H5Tcommit (hid_t location, const char - *name, hid_t type) -
A transient datatype (not immutable) can be committed to a - file and turned into a named datatype by calling this - function. The location is either a file or group - identifier and when combined with name refers to a new - named datatype. - -

-
htri_t H5Tcommitted (hid_t type) -
A type can be queried to determine if it is a named type or - a transient type. If this function returns a positive value - then the type is named (that is, it has been committed perhaps - by some other application). Datasets which return committed - datatypes with H5Dget_type() are able to share - the datatype with other datasets in the same file. - -

-
hid_t H5Tcopy (hid_t type) -
This function returns a modifiable transient datatype - which is a copy of type or a negative value for - failure. If type is a dataset identifier then the type - returned is a modifiable transient copy of the datatype of - the specified dataset. - -

-
herr_t H5Tclose (hid_t type) -
Releases resources associated with a datatype. The - datatype identifier should not be subsequently used since the - results would be unpredictable. It is illegal to close an - immutable transient datatype. - -

-
htri_t H5Tequal (hid_t type1, hid_t - type2) -
Determines if two types are equal. If type1 and - type2 are the same then this function returns - TRUE, otherwise it returns FALSE (an - error results in a negative return value). - -

-
herr_t H5Tlock (hid_t type) -
A transient datatype can be locked, making it immutable - (read-only and not closable). The library does this to all - predefined types to prevent the application from inadvertently - modifying or deleting (closing) them, but the application is - also allowed to do this for its own datatypes. Immutable - datatypes are closed when the library closes (either by - H5close() or by normal program termination). -
- -

3. Properties of Atomic Types

- -

An atomic type is a type which cannot be decomposed into - smaller units at the API level. All atomic types have a common - set of properties which are augmented by properties specific to - a particular type class. Some of these properties also apply to - compound datatypes, but we discuss them only as they apply to - atomic datatypes here. The properties and the functions that - query and set their values are: - -

-
H5T_class_t H5Tget_class (hid_t type) -
This property holds one of the class names: - H5T_INTEGER, H5T_FLOAT, H5T_TIME, H5T_STRING, or - H5T_BITFIELD. This property is read-only and is set - when the datatype is created or copied (see - H5Tcreate(), H5Tcopy()). If this - function fails it returns H5T_NO_CLASS which has - a negative value (all other class constants are non-negative). - -

-
size_t H5Tget_size (hid_t type) -
herr_t H5Tset_size (hid_t type, size_t - size) -
This property is total size of the datum in bytes, including - padding which may appear on either side of the actual value. - If this property is reset to a smaller value which would cause - the significant part of the data to extend beyond the edge of - the datatype then the offset property is - decremented a bit at a time. If the offset reaches zero and - the significant part of the data still extends beyond the edge - of the datatype then the precision property is - decremented a bit at a time. Decreasing the size of a - datatype may fail if the H5T_FLOAT bit fields would - extend beyond the significant part of the type. Adjusting the - size of an H5T_STRING automatically adjusts the - precision as well. On error, H5Tget_size() - returns zero which is never a valid size. - -

-
H5T_order_t H5Tget_order (hid_t type) -
herr_t H5Tset_order (hid_t type, H5T_order_t - order) -
All atomic datatypes have a byte order which describes how - the bytes of the datatype are layed out in memory. If the - lowest memory address contains the least significant byte of - the datum then it is said to be little-endian or - H5T_ORDER_LE. If the bytes are in the oposite - order then they are said to be big-endian or - H5T_ORDER_BE. Some datatypes have the same byte - order on all machines and are H5T_ORDER_NONE - (like character strings). If H5Tget_order() - fails then it returns H5T_ORDER_ERROR which is a - negative value (all successful return values are - non-negative). - -

-
size_t H5Tget_precision (hid_t type) -
herr_t H5Tset_precision (hid_t type, size_t - precision) -
Some datatypes occupy more bytes than what is needed to - store the value. For instance, a short on a Cray - is 32 significant bits in an eight-byte field. The - precision property identifies the number of - significant bits of a datatype and the offset - property (defined below) identifies its location. The - size property defined above represents the entire - size (in bytes) of the datatype. If the precision is - decreased then padding bits are inserted on the MSB side of - the significant bits (this will fail for - H5T_FLOAT types if it results in the sign, - mantissa, or exponent bit field extending beyond the edge of - the significant bit field). On the other hand, if the - precision is increased so that it "hangs over" the edge of the - total size then the offset property is - decremented a bit at a time. If the offset - reaches zero and the significant bits still hang over the - edge, then the total size is increased a byte at a time. The - precision of an H5T_STRING is read-only and is - always eight times the value returned by - H5Tget_size(). H5Tget_precision() - returns zero on failure since zero is never a valid precision. - -

-
size_t H5Tget_offset (hid_t type) -
herr_t H5Tset_offset (hid_t type, size_t - offset) -
While the precision property defines the number - of significant bits, the offset property defines - the location of those bits within the entire datum. The bits - of the entire data are numbered beginning at zero at the least - significant bit of the least significant byte (the byte at the - lowest memory address for a little-endian type or the byte at - the highest address for a big-endian type). The - offset property defines the bit location of the - least signficant bit of a bit field whose length is - precision. If the offset is increased so the - significant bits "hang over" the edge of the datum, then the - size property is automatically incremented. The - offset is a read-only property of an H5T_STRING - and is always zero. H5Tget_offset() returns zero - on failure which is also a valid offset, but is guaranteed to - succeed if a call to H5Tget_precision() succeeds - with the same arguments. - -

-
herr_t H5Tget_pad (hid_t type, H5T_pad_t - *lsb, H5T_pad_t *msb) -
herr_t H5Tset_pad (hid_t type, H5T_pad_t - lsb, H5T_pad_t msb) -
The bits of a datum which are not significant as defined by - the precision and offset properties - are called padding. Padding falls into two - categories: padding in the low-numbered bits is lsb - padding and padding in the high-numbered bits is msb - padding (bits are numbered according to the description for - the offset property). Padding bits can always be - set to zero (H5T_PAD_ZERO) or always set to one - (H5T_PAD_ONE). The current pad types are returned - through arguments of H5Tget_pad() either of which - may be null pointers. -
- -

3.1. Properties of Integer Atomic Types

- -

Integer atomic types (class=H5T_INTEGER) - describe integer number formats. Such types include the - following information which describes the type completely and - allows conversion between various integer atomic types. - -

-
H5T_sign_t H5Tget_sign (hid_t type) -
herr_t H5Tset_sign (hid_t type, H5T_sign_t - sign) -
Integer data can be signed two's complement - (H5T_SGN_2) or unsigned - (H5T_SGN_NONE). Whether data is signed or not - becomes important when converting between two integer - datatypes of differing sizes as it determines how values are - truncated and sign extended. -
- -

3.2. Properties of Floating-point Atomic Types

- -

The library supports floating-point atomic types - (class=H5T_FLOAT) as long as the bits of the - exponent are contiguous and stored as a biased positive number, - the bits of the mantissa are contiguous and stored as a positive - magnitude, and a sign bit exists which is set for negative - values. Properties specific to floating-point types are: - -

-
herr_t H5Tget_fields (hid_t type, size_t - *spos, size_t *epos, size_t - *esize, size_t *mpos, size_t - *msize) -
herr_t H5Tset_fields (hid_t type, size_t - spos, size_t epos, size_t esize, - size_t mpos, size_t msize) -
A floating-point datum has bit fields which are the exponent - and mantissa as well as a mantissa sign bit. These properties - define the location (bit position of least significant bit of - the field) and size (in bits) of each field. The bit - positions are numbered beginning at zero at the beginning of - the significant part of the datum (see the descriptions of the - precision and offset - properties). The sign bit is always of length one and none of - the fields are allowed to overlap. When expanding a - floating-point type one should set the precision first; when - decreasing the size one should set the field positions and - sizes first. - -

-
size_t H5Tget_ebias (hid_t type) -
herr_t H5Tset_ebias (hid_t type, size_t - ebias) -
The exponent is stored as a non-negative value which is - ebias larger than the true exponent. - H5Tget_ebias() returns zero on failure which is - also a valid exponent bias, but the function is guaranteed to - succeed if H5Tget_precision() succeeds when - called with the same arguments. - -

-
H5T_norm_t H5Tget_norm (hid_t type) -
herr_t H5Tset_norm (hid_t type, H5T_norm_t - norm) -
This property determines the normalization method of the - mantissa. -
    -
  • If the value is H5T_NORM_MSBSET then the - mantissa is shifted left (if non-zero) until the first bit - after the radix point is set and the exponent is adjusted - accordingly. All bits of the mantissa after the radix - point are stored. - -
  • If its value is H5T_NORM_IMPLIED then the - mantissa is shifted left (if non-zero) until the first bit - after the radix point is set and the exponent is adjusted - accordingly. The first bit after the radix point is not stored - since it's always set. - -
  • If its value is H5T_NORM_NONE then the fractional - part of the mantissa is stored without normalizing it. -
- -

-
H5T_pad_t H5Tget_inpad (hid_t type) -
herr_t H5Tset_inpad (hid_t type, H5T_pad_t - inpad) -
If any internal bits (that is, bits between the sign bit, - the mantissa field, and the exponent field but within the - precision field) are unused, then they will be filled - according to the value of this property. The inpad - argument can be H5T_PAD_ZERO if the internal - padding should always be set to zero, or H5T_PAD_ONE - if it should always be set to one. - H5Tget_inpad() returns H5T_PAD_ERROR - on failure which is a negative value (successful return is - always non-negative). -
- -

3.3. Properties of Date and Time Atomic Types

- -

Dates and times (class=H5T_TIME) are stored as - character strings in one of the ISO-8601 formats like - "1997-12-05 16:25:30"; as character strings using the - Unix asctime(3) format like "Thu Dec 05 16:25:30 1997"; - as an integer value by juxtaposition of the year, month, and - day-of-month, hour, minute and second in decimal like - 19971205162530; as an integer value in Unix time(2) - format; or other variations. - -

3.4. Properties of Character String Atomic Types

- -

Fixed-length character string types are used to store textual - information. The offset property of a string is - always zero and the precision property is eight - times as large as the value returned by - H5Tget_size() (since precision is measured in bits - while size is measured in bytes). Both properties are - read-only. - -

-
H5T_cset_t H5Tget_cset (hid_t type) -
herr_t H5Tset_cset (hid_t type, H5T_cset_t - cset) -
HDF5 is able to distinguish between character sets of - different nationalities and to convert between them to the - extent possible. The only character set currently supported - is H5T_CSET_ASCII. - -

-
H5T_str_t H5Tget_strpad (hid_t type) -
herr_t H5Tset_strpad (hid_t type, H5T_str_t - strpad) -
The method used to store character strings differs with the - programming language: C usually null terminates strings while - Fortran left-justifies and space-pads strings. This property - defines the storage mechanism and can be - -

-

-
H5T_STR_NULLTERM -
A C-style string which is guaranteed to be null - terminated. When converting from a longer string the - value will be truncated and then a null character - appended. - -

-
H5T_STR_NULLPAD -
A C-style string which is padded with null characters - but not necessarily null terminated. Conversion from a - long string to a shorter H5T_STR_NULLPAD - string will truncate but not null terminate. Conversion - from a short value to a longer value will append null - characters as with H5T_STR_NULLTERM. - -

-
H5T_STR_SPACEPAD -
A Fortran-style string which is padded with space - characters. This is the same as - H5T_STR_NULLPAD except the padding character - is a space instead of a null. -
- -

H5Tget_strpad() returns - H5T_STR_ERROR on failure, a negative value (all - successful return values are non-negative). -

- -

3.5. Properties of Bit Field Atomic Types

- -

Converting a bit field (class=H5T_BITFIELD) from - one type to another simply copies the significant bits. If the - destination is smaller than the source then bits are truncated. - Otherwise new bits are filled according to the msb - padding type. - -

3.6. Character and String Datatype Issues

- - The H5T_NATIVE_CHAR and H5T_NATIVE_UCHAR - datatypes are actually numeric data (1-byte integers). If the - application wishes to store character data, then an HDF5 - string datatype should be derived from - H5T_C_S1 instead. - -

Motivation

- - HDF5 defines at least three classes of datatypes: - integer data, floating point data, and character data. - However, the C language defines only integer and - floating point datatypes; character data in C is - overloaded on the 8- or 16-bit integer types and - character strings are overloaded on arrays of those - integer types which, by convention, are terminated with - a zero element. - - In C, the variable unsigned char s[256] is - either an array of numeric data, a single character string - with at most 255 characters, or an array of 256 characters, - depending entirely on usage. For uniformity with the - other H5T_NATIVE_ types, HDF5 uses the - numeric interpretation of H5T_NATIVE_CHAR - and H5T_NATIVE_UCHAR. - - -

Usage

- - To store unsigned char s[256] data as an - array of integer values, use the HDF5 datatype - H5T_NATIVE_UCHAR and a data space that - describes the 256-element array. Some other application - that reads the data will then be able to read, say, a - 256-element array of 2-byte integers and HDF5 will - perform the numeric translation. - - To store unsigned char s[256] data as a - character string, derive a fixed length string datatype - from H5T_C_S1 by increasing its size to - 256 characters. Some other application that reads the - data will be able to read, say, a space padded string - of 16-bit characters and HDF5 will perform the character - and padding translations. - -
-                hid_t s256 = H5Tcopy(H5T_C_S1);
-                             H5Tset_size(s256, 256);
-          
- - To store unsigned char s[256] data as - an array of 256 ASCII characters, use an - HDF5 data space to describe the array and derive a - one-character string type from H5T_C_S1. - Some other application will be able to read a subset - of the array as 16-bit characters and HDF5 will - perform the character translations. - The H5T_STR_NULLPAD is necessary because - if H5T_STR_NULLTERM were used - (the default) then the single character of storage - would be for the null terminator and no useful data - would actually be stored (unless the length were - incremented to more than one character). - -
-                hid_t s1 = H5Tcopy(H5T_C_S1);
-                           H5Tset_strpad(s1, H5T_STR_NULLPAD);
-          
- -

Summary

- - The C language uses the term char to - represent one-byte numeric data and does not make - character strings a first-class datatype. - HDF5 makes a distinction between integer and - character data and maps the C signed char - (H5T_NATIVE_CHAR) and - unsigned char (H5T_NATIVE_UCHAR) - datatypes to the HDF5 integer type class. - -

4. Properties of Opaque Types

- -

Opaque types (class=H5T_OPAQUE) provide the - application with a mechanism for describing data which cannot be - otherwise described by HDF5. The only properties associated with - opaque types are a size in bytes and an ASCII tag which is - manipulated with H5Tset_tag() and - H5Tget_tag() functions. The library contains no - predefined conversion functions but the application is free to - register conversions between any two opaque types or between an - opaque type and some other type. - -

5. Properties of Compound Types

- -

A compound datatype is similar to a struct in C - or a common block in Fortran: it is a collection of one or more - atomic types or small arrays of such types. Each - member of a compound type has a name which is unique - within that type, and a byte offset that determines the first - byte (smallest byte address) of that member in a compound datum. - A compound datatype has the following properties: - -

-
H5T_class_t H5Tget_class (hid_t type) -
All compound datatypes belong to the type class - H5T_COMPOUND. This property is read-only and is - defined when a datatype is created or copied (see - H5Tcreate() or H5Tcopy()). - -

-
size_t H5Tget_size (hid_t type) -
Compound datatypes have a total size in bytes which is - returned by this function. All members of a compound - datatype must exist within this size. A value of zero is returned - for failure; all successful return values are positive. - -

-
int H5Tget_nmembers (hid_t type) -
A compound datatype consists of zero or more members - (defined in any order) with unique names and which occupy - non-overlapping regions within the datum. In the functions - that follow, individual members are referenced by an index - number between zero and N-1, inclusive, where - N is the value returned by this function. - H5Tget_nmembers() returns -1 on failure. - -

-
char *H5Tget_member_name (hid_t type, unsigned - membno) -
Each member has a name which is unique among its siblings in - a compound datatype. This function returns a pointer to a - null-terminated copy of the name allocated with - malloc() or the null pointer on failure. The - caller is responsible for freeing the memory returned by this - function. - -

-
size_t H5Tget_member_offset (hid_t type, unsigned - membno) -
The byte offset of member number membno with - respect to the beginning of the containing compound datum is - returned by this function. A zero is returned on failure - which is also a valid offset, but this function is guaranteed - to succeed if a call to H5Tget_member_class() - succeeds when called with the same type and - membno arguments. - -

-
hid_t H5Tget_member_type (hid_t type, unsigned - membno) -
Each member has its own datatype, a copy of which is - returned by this function. The returned datatype identifier - should be released by eventually calling - H5Tclose() on that type. -
- -

Properties of members of a compound datatype are - defined when the member is added to the compound type (see - H5Tinsert()) and cannot be subsequently modified. - This makes it imposible to define recursive data structures. - - - -

6. Predefined Atomic Datatypes

- - -

The library predefines a modest number of datatypes having - names like H5T_arch_base where - arch is an architecture name and base is a - programming type name. New types can be derived from the - predifined types by copying the predefined type (see - H5Tcopy()) and then modifying the result. - -

-

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Architecture NameDescription
IEEEThis architecture defines standard floating point - types in various byte orders.
STDThis is an architecture that contains semi-standard - datatypes like signed two's complement integers, - unsigned integers, and bitfields in various byte - orders.
UNIXTypes which are specific to Unix operating systems are - defined in this architecture. The only type currently - defined is the Unix date and time types - (time_t).
C
FORTRAN
Types which are specific to the C or Fortran - programming languages are defined in these - architectures. For instance, H5T_C_STRING - defines a base string type with null termination which - can be used to derive string types of other - lengths.
NATIVEThis architecture contains C-like datatypes for the - machine on which the library was compiled. The types - were actually defined by running the - H5detect program when the library was - compiled. In order to be portable, applications should - almost always use this architecture to describe things - in memory.
CRAYCray architectures. These are word-addressable, - big-endian systems with non-IEEE floating point.
INTELAll Intel and compatible CPU's including 80286, 80386, - 80486, Pentium, Pentium-Pro, and Pentium-II. These are - little-endian systems with IEEE floating-point.
MIPSAll MIPS CPU's commonly used in SGI systems. These - are big-endian systems with IEEE floating-point.
ALPHAAll DEC Alpha CPU's, little-endian systems with IEEE - floating-point.
-
- -

The base name of most types consists of a letter, a precision - in bits, and an indication of the byte order. The letters are: - -

-

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
BBitfield
DDate and time
FFloating point
ISigned integer
RReferences
SCharacter string
UUnsigned integer
-
- -

The byte order is a two-letter sequence: - -

-

- - - - - - - - - - - - - -
BEBig endian
LELittle endian
VXVax order
-
- -

-

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -


Example


Description
H5T_IEEE_F64LEEight-byte, little-endian, IEEE floating-point
H5T_IEEE_F32BEFour-byte, big-endian, IEEE floating point
H5T_STD_I32LEFour-byte, little-endian, signed two's complement integer
H5T_STD_U16BETwo-byte, big-endian, unsigned integer
H5T_UNIX_D32LEFour-byte, little-endian, time_t
H5T_C_S1One-byte, null-terminated string of eight-bit characters
H5T_INTEL_B64Eight-byte bit field on an Intel CPU
H5T_CRAY_F64Eight-byte Cray floating point
H5T_STD_ROBJReference to an entire object in a file
-
- -

The NATIVE architecture has base names which don't - follow the same rules as the others. Instead, native type names - are similar to the C type names. Here are some examples: - -

-

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -


Example


Corresponding C Type
H5T_NATIVE_CHARchar
H5T_NATIVE_SCHARsigned char
H5T_NATIVE_UCHARunsigned char
H5T_NATIVE_SHORTshort
H5T_NATIVE_USHORTunsigned short
H5T_NATIVE_INTint
H5T_NATIVE_UINTunsigned
H5T_NATIVE_LONGlong
H5T_NATIVE_ULONGunsigned long
H5T_NATIVE_LLONGlong long
H5T_NATIVE_ULLONGunsigned long long
H5T_NATIVE_FLOATfloat
H5T_NATIVE_DOUBLEdouble
H5T_NATIVE_LDOUBLElong double
H5T_NATIVE_HSIZEhsize_t
H5T_NATIVE_HSSIZEhssize_t
H5T_NATIVE_HERRherr_t
H5T_NATIVE_HBOOLhbool_t
-
- -

-

- - - - - -

Example: A 128-bit - integer

-

To create a 128-bit, little-endian signed integer - type one could use the following (increasing the - precision of a type automatically increases the total - size): - -

-hid_t new_type = H5Tcopy (H5T_NATIVE_INT);
-H5Tset_precision (new_type, 128);
-H5Tset_order (new_type, H5T_ORDER_LE);
-	      
-
-
- -

-

- - - - - -

Example: An 80-character - string

-

To create an 80-byte null terminated string type one - might do this (the offset of a character string is - always zero and the precision is adjusted - automatically to match the size): - -

-hid_t str80 = H5Tcopy (H5T_C_S1);
-H5Tset_size (str80, 80);
-	      
-
-
- -

A complete list of the datatypes predefined in HDF5 can be found in - HDF5 Predefined Datatypes - in the HDF5 Reference Manual. - - -

7. Defining Compound Datatypes

- -

Unlike atomic datatypes which are derived from other atomic - datatypes, compound datatypes are created from scratch. First, - one creates an empty compound datatype and specifies it's total - size. Then members are added to the compound datatype in any - order. - -

Usually a C struct will be defined to hold a data point in - memory, and the offsets of the members in memory will be the - offsets of the struct members from the beginning of an instance - of the struct. - -

-
HOFFSET(s,m) -
This macro computes the offset of member m within - a struct s. -
offsetof(s,m) -
This macro defined in stddef.h does - exactly the same thing as the HOFFSET() macro. -
- -

Each member must have a descriptive name which is the - key used to uniquely identify the member within the compound - datatype. A member name in an HDF5 datatype does not - necessarily have to be the same as the name of the member in the - C struct, although this is often the case. Nor does one need to - define all members of the C struct in the HDF5 compound - datatype (or vice versa). - -

-

- - - - - -

Example: A simple struct

-

An HDF5 datatype is created to describe complex - numbers whose type is defined by the - complex_t struct. - -

-typedef struct {
-   double re;   /*real part*/
-   double im;   /*imaginary part*/
-} complex_t;
-
-hid_t complex_id = H5Tcreate (H5T_COMPOUND, sizeof tmp);
-H5Tinsert (complex_id, "real", HOFFSET(complex_t,re),
-           H5T_NATIVE_DOUBLE);
-H5Tinsert (complex_id, "imaginary", HOFFSET(complex_t,im),
-           H5T_NATIVE_DOUBLE);
-	      
-
-
- -

Member alignment is handled by the HOFFSET - macro. However, data stored on disk does not require alignment, - so unaligned versions of compound data structures can be created - to improve space efficiency on disk. These unaligned compound - datatypes can be created by computing offsets by hand to - eliminate inter-member padding, or the members can be packed by - calling H5Tpack() (which modifies a datatype - directly, so it is usually preceded by a call to - H5Tcopy()): - -

-

- - - - - -

Example: A packed struct

-

This example shows how to create a disk version of a - compound datatype in order to store data on disk in - as compact a form as possible. Packed compound - datatypes should generally not be used to describe memory - as they may violate alignment constraints for the - architecture being used. Note also that using a - packed datatype for disk storage may involve a higher - data conversion cost. -

-hid_t complex_disk_id = H5Tcopy (complex_id);
-H5Tpack (complex_disk_id);
-	      
-
-
- - -

-

- - - - - -

Example: A flattened struct

-

Compound datatypes that have a compound datatype - member can be handled two ways. This example shows - that the compound datatype can be flattened, - resulting in a compound type with only atomic - members. - -

-typedef struct {
-   complex_t x;
-   complex_t y;
-} surf_t;
-
-hid_t surf_id = H5Tcreate (H5T_COMPOUND, sizeof tmp);
-H5Tinsert (surf_id, "x-re", HOFFSET(surf_t,x.re),
-           H5T_NATIVE_DOUBLE);
-H5Tinsert (surf_id, "x-im", HOFFSET(surf_t,x.im),
-           H5T_NATIVE_DOUBLE);
-H5Tinsert (surf_id, "y-re", HOFFSET(surf_t,y.re),
-           H5T_NATIVE_DOUBLE);
-H5Tinsert (surf_id, "y-im", HOFFSET(surf_t,y.im),
-           H5T_NATIVE_DOUBLE);
-	      
-
-
- -

-

- - - - - -

Example: A nested struct

-

However, when the complex_t is used - often it becomes inconvenient to list its members over - and over again. So the alternative approach to - flattening is to define a compound datatype and then - use it as the type of the compound members, as is done - here (the typedefs are defined in the previous - examples). - -

-hid_t complex_id, surf_id; /*hdf5 datatypes*/
-
-complex_id = H5Tcreate (H5T_COMPOUND, sizeof c);
-H5Tinsert (complex_id, "re", HOFFSET(complex_t,re),
-           H5T_NATIVE_DOUBLE);
-H5Tinsert (complex_id, "im", HOFFSET(complex_t,im),
-           H5T_NATIVE_DOUBLE);
-
-surf_id = H5Tcreate (H5T_COMPOUND, sizeof s);
-H5Tinsert (surf_id, "x", HOFFSET(surf_t,x), complex_id);
-H5Tinsert (surf_id, "y", HOFFSET(surf_t,y), complex_id);
-	      
-
-
- - - -   -

8. Enumeration Datatypes

- -

8.1. Introduction

- -

An HDF enumeration datatype is a 1:1 mapping between a set of - symbols and a set of integer values, and an order is imposed on - the symbols by their integer values. The symbols are passed - between the application and library as character strings and all - the values for a particular enumeration type are of the same - integer type, which is not necessarily a native type. - -

8.2. Creation

- -

Creation of an enumeration datatype resembles creation of a - compound datatype: first an empty enumeration type is created, - then members are added to the type, then the type is optionally - locked. - -

-
hid_t H5Tcreate(H5T_class_t type_class, - size_t size) -
This function creates a new empty enumeration datatype based - on a native signed integer type. The first argument is the - constant H5T_ENUM and the second argument is the - size in bytes of the native integer on which the enumeration - type is based. If the architecture does not support a native - signed integer of the specified size then an error is - returned. - -
-/* Based on a native signed short */
-hid_t hdf_en_colors = H5Tcreate(H5T_ENUM, sizeof(short));
- - -
hid_t H5Tenum_create(hid_t base) -
This function creates a new empty enumeration datatype based - on some integer datatype base and is a - generalization of the H5Tcreate() function. This - function is useful when creating an enumeration type based on - some non-native integer datatype, but it can be used for - native types as well. - -
-/* Based on a native unsigned short */
-hid_t hdf_en_colors_1 = H5Tenum_create(H5T_NATIVE_USHORT);
-
-/* Based on a MIPS 16-bit unsigned integer */
-hid_t hdf_en_colors_2 = H5Tenum_create(H5T_MIPS_UINT16);
-
-/* Based on a big-endian 16-bit unsigned integer */
-hid_t hdf_en_colors_3 = H5Tenum_create(H5T_STD_U16BE);
- - -
herr_t H5Tenum_insert(hid_t etype, const char - *symbol, void *value) -
Members are inserted into the enumeration datatype - etype with this function. Each member has a symbolic - name symbol and some integer representation - value. The value argument must point to a value - of the same datatype as specified when the enumeration type - was created. The order of member insertion is not important - but all symbol names and values must be unique within a - particular enumeration type. - -
-short val;
-H5Tenum_insert(hdf_en_colors, "RED",   (val=0,&val));
-H5Tenum_insert(hdf_en_colors, "GREEN", (val=1,&val));
-H5Tenum_insert(hdf_en_colors, "BLUE",  (val=2,&val));
-H5Tenum_insert(hdf_en_colors, "WHITE", (val=3,&val));
-H5Tenum_insert(hdf_en_colors, "BLACK", (val=4,&val));
- - -
herr_t H5Tlock(hid_t etype) -
This function locks a datatype so it cannot be modified or - freed unless the entire HDF5 library is closed. Its use is - completely optional but using it on an application datatype - makes that datatype act like a predefined datatype. - -
-H5Tlock(hdf_en_colors);
- -
- -

8.3. Integer Operations

- -

Because an enumeration datatype is derived from an integer - datatype, any operation which can be performed on integer - datatypes can also be performed on enumeration datatypes. This - includes: - -

-

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
H5Topen()H5Tcreate()H5Tcopy()H5Tclose()
H5Tequal()H5Tlock()H5Tcommit()H5Tcommitted()
H5Tget_class()H5Tget_size()H5Tget_order()H5Tget_pad()
H5Tget_precision()H5Tget_offset()H5Tget_sign()H5Tset_size()
H5Tset_order()H5Tset_precision()H5Tset_offset()H5Tset_pad()
H5Tset_sign()
-
- -

In addition, the new function H5Tget_super() will - be defined for all datatypes that are derived from existing - types (currently just enumeration types). - -

-
hid_t H5Tget_super(hid_t type) -
Return the datatype from which type is - derived. When type is an enumeration datatype then - the returned value will be an integer datatype but not - necessarily a native type. One use of this function would be - to create a new enumeration type based on the same underlying - integer type and values but with possibly different symbols. - -
-hid_t itype = H5Tget_super(hdf_en_colors);
-hid_t hdf_fr_colors = H5Tenum_create(itype);
-H5Tclose(itype);
-
-short val;
-H5Tenum_insert(hdf_fr_colors, "ouge",  (val=0,&val));
-H5Tenum_insert(hdf_fr_colors, "vert",  (val=1,&val));
-H5Tenum_insert(hdf_fr_colors, "bleu",  (val=2,&val));
-H5Tenum_insert(hdf_fr_colors, "blanc", (val=3,&val));
-H5Tenum_insert(hdf_fr_colors, "noir",  (val=4,&val));
-H5Tlock(hdf_fr_colors);
-
- -

8.4. Type Functions

- -

A small set of functions is available for querying properties - of an enumeration type. These functions are likely to be used - by browsers to display datatype information. - -

-
int H5Tget_nmembers(hid_t etype) -
When given an enumeration datatype etype this - function returns the number of members defined for that - type. This function is already implemented for compound - datatypes. - -

-
char *H5Tget_member_name(hid_t etype, unsigned - membno) -
Given an enumeration datatype etype this function - returns the symbol name for the member indexed by - membno. Members are numbered from zero to - N-1 where N is the return value from - H5Tget_nmembers(). The members are stored in no - particular order. This function is already implemented for - compound datatypes. If an error occurs then the null pointer - is returned. The return value should be freed by calling - free(). - -

-
herr_t H5Tget_member_value(hid_t etype, unsigned - membno, void *value/*out*/) -
Given an enumeration datatype etype this function - returns the value associated with the member indexed by - membno (as described for - H5Tget_member_name()). The value returned - is in the domain of the underlying integer - datatype which is often a native integer type. The - application should ensure that the memory pointed to by - value is large enough to contain the result (the size - can be obtained by calling H5Tget_size() on - either the enumeration type or the underlying integer type - when the type is not known by the C compiler. - -
-int n = H5Tget_nmembers(hdf_en_colors);
-unsigned u;
-for (u=0; u<(unsigned)n; u++) {
-    char *symbol = H5Tget_member_name(hdf_en_colors, u);
-    short val;
-    H5Tget_member_value(hdf_en_colors, u, &val);
-    printf("#%u %20s = %d\n", u, symbol, val);
-    free(symbol);
-}
- -

- Output: -

-#0                BLACK = 4
-#1                 BLUE = 2
-#2                GREEN = 1
-#3                  RED = 0
-#4                WHITE = 3
-
- -

8.5. Data Functions

- -

In addition to querying about the enumeration type properties, - an application may want to make queries about enumerated - data. These functions perform efficient mappings between symbol - names and values. - -

-
herr_t H5Tenum_valueof(hid_t etype, const char - *symbol, void *value/*out*/) -
Given an enumeration datatype etype this function - returns through value the bit pattern associated with - the symbol name symbol. The value argument - should point to memory which is large enough to hold the result, - which is returned as the underlying integer datatype specified - when the enumeration type was created, often a native integer - type. - -

-
herr_t H5Tenum_nameof(hid_t etype, void - *value, char *symbol, size_t - size) -
This function translates a bit pattern pointed to by - value to a symbol name according to the mapping - defined in the enumeration datatype etype and stores - at most size characters of that name (counting the - null terminator) to the symbol buffer. If the name is - longer than the result buffer then the result is not null - terminated and the function returns failure. If value - points to a bit pattern which is not in the domain of the - enumeration type then the first byte of the symbol - buffer is set to zero and the function fails. - -
-short data[1000] = {4, 2, 0, 0, 5, 1, ...};
-int i;
-char symbol[32];
-
-for (i=0; i<1000; i++) {
-    if (H5Tenum_nameof(hdf_en_colors, data+i, symbol,
-                       sizeof symbol))<0) {
-        if (symbol[0]) {
-            strcpy(symbol+sizeof(symbol)-4, "...");
-        } else {
-            strcpy(symbol, "UNKNOWN");
-        }
-    }
-    printf("%d %s\n", data[i], symbol);
-}
-printf("}\n");
- -

- Output: -

-4 BLACK
-2 BLUE
-0 RED
-0 RED
-5 UNKNOWN
-1 GREEN
-...
-
- -

8.6. Conversion

- -

Enumerated data can be converted from one type to another - provided the destination enumeration type contains all the - symbols of the source enumeration type. The conversion operates - by matching up the symbol names of the source and destination - enumeration types to build a mapping from source value to - destination value. For instance, if we are translating from an - enumeration type that defines a sequence of integers as the - values for the colors to a type that defines a different bit for - each color then the mapping might look like this: - -

Enumeration Mapping - -

That is, a source value of 2 which corresponds to - BLUE would be mapped to 0x0004. The - following code snippet builds the second datatype, then - converts a raw data array from one datatype to another, and - then prints the result. - -

-/* Create a new enumeration type */
-short val;
-hid_t bits = H5Tcreate(H5T_ENUM, sizeof val);
-H5Tenum_insert(bits, "RED",   (val=0x0001,&val));
-H5Tenum_insert(bits, "GREEN", (val=0x0002,&val));
-H5Tenum_insert(bits, "BLUE",  (val=0x0004,&val));
-H5Tenum_insert(bits, "WHITE", (val=0x0008,&val));
-H5Tenum_insert(bits, "BLACK", (val=0x0010,&val));
-
-/* The data */
-short data[6] = {1, 4, 2, 0, 3, 5};
-
-/* Convert the data from one type to another */
-H5Tconvert(hdf_en_colors, bits, 5, data, NULL, plist_id);
-
-/* Print the data */
-for (i=0; i<6; i++) {
-    printf("0x%04x\n", (unsigned)(data[i]));
-}
- -

- Output: -

-
-0x0002
-0x0010
-0x0004
-0x0001
-0x0008
-0xffff
- -

If the source data stream contains values which are not in the - domain of the conversion map then an overflow exception is - raised within the library, causing the application defined - overflow handler to be invoked (see - H5Tset_overflow()). If no overflow handler is - defined then all bits of the destination value will be set. - -

The HDF library will not provide conversions between enumerated - data and integers although the application is free to do so - (this is a policy we apply to all classes of HDF datatypes). - However, since enumeration types are derived from - integer types it is permissible to treat enumerated data as - integers and perform integer conversions in that context. - -

8.7. Symbol Order

- -

Symbol order is determined by the integer values associated - with each symbol. When the integer datatype is a native type, - testing the relative order of two symbols is an easy process: - simply compare the values of the symbols. If only the symbol - names are available then the values must first be determined by - calling H5Tenum_valueof(). - -

-short val1, val2;
-H5Tenum_valueof(hdf_en_colors, "WHITE", &val1);
-H5Tenum_valueof(hdf_en_colors, "BLACK", &val2);
-if (val1 < val2) ...
- -

When the underlying integer datatype is not a native type then - the easiest way to compare symbols is to first create a similar - enumeration type that contains all the same symbols but has a - native integer type (HDF type conversion features can be used to - convert the non-native values to native values). Once we have a - native type we can compare symbol order as just described. If - foreign is some non-native enumeration type then a - native type can be created as follows: - -

-int n = H5Tget_nmembers(foreign);
-hid_t itype = H5Tget_super(foreign);
-void *val = malloc(n * MAX(H5Tget_size(itype), sizeof(int)));
-char *name = malloc(n * sizeof(char*));
-unsigned u;
-
-/* Get foreign type information */
-for (u=0; u<(unsigned)n; u++) {
-    name[u] = H5Tget_member_name(foreign, u);
-    H5Tget_member_value(foreign, u,
-                        (char*)val+u*H5Tget_size(foreign));
-}
-
-/* Convert integer values to new type */
-H5Tconvert(itype, H5T_NATIVE_INT, n, val, NULL, plist_id);
-
-/* Build a native type */
-hid_t native = H5Tenum_create(H5T_NATIVE_INT);
-for (i=0; i<n; i++) {
-    H5Tenum_insert(native, name[i], ((int*)val)[i]);
-    free(name[i]);
-}
-free(name);
-free(val);
- -

It is also possible to convert enumerated data to a new type - that has a different order defined for the symbols. For - instance, we can define a new type, reverse that - defines the same five colors but in the reverse order. - -

-short val;
-int i;
-char sym[8];
-short data[5] = {0, 1, 2, 3, 4};
-
-hid_t reverse = H5Tenum_create(H5T_NATIVE_SHORT);
-H5Tenum_insert(reverse, "BLACK", (val=0,&val));
-H5Tenum_insert(reverse, "WHITE", (val=1,&val));
-H5Tenum_insert(reverse, "BLUE",  (val=2,&val));
-H5Tenum_insert(reverse, "GREEN", (val=3,&val));
-H5Tenum_insert(reverse, "RED",   (val=4,&val));
-
-/* Print data */
-for (i=0; i<5; i++) {
-    H5Tenum_nameof(hdf_en_colors, data+i, sym, sizeof sym);
-    printf ("%d %s\n", data[i], sym);
-}
-
-puts("Converting...");
-H5Tconvert(hdf_en_colors, reverse, 5, data, NULL, plist_id);
-
-/* Print data */
-for (i=0; i<5; i++) {
-    H5Tenum_nameof(reverse, data+i, sym, sizeof sym);
-    printf ("%d %s\n", data[i], sym);
-}
- -

- Output: -

-0 RED
-1 GREEN
-2 BLUE
-3 WHITE
-4 BLACK
-Converting...
-4 RED
-3 GREEN
-2 BLUE
-1 WHITE
-0 BLACK
- -

8.8. Equality

- -

The order that members are inserted into an enumeration type is - unimportant; the important part is the associations between the - symbol names and the values. Thus, two enumeration datatypes - will be considered equal if and only if both types have the same - symbol/value associations and both have equal underlying integer - datatypes. Type equality is tested with the - H5Tequal() function. - -

8.9. Interacting with C's enum Type

- -

Although HDF enumeration datatypes are similar to C - enum datatypes, there are some important - differences: - -

-

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
DifferenceMotivation/Implications
Symbols are unquoted in C but quoted in - HDF.This allows the application to manipulate - symbol names in ways that are not possible with C.
The C compiler automatically replaces all - symbols with their integer values but HDF requires - explicit calls to do the same.C resolves symbols at compile time while - HDF resolves symbols at run time.
The mapping from symbols to integers is - N:1 in C but 1:1 in HDF.HDF can translate from value to name - uniquely and large switch statements are - not necessary to print values in human-readable - format.
A symbol must appear in only one C - enum type but may appear in multiple HDF - enumeration types.The translation from symbol to value in HDF - requires the datatype to be specified while in C the - datatype is not necessary because it can be inferred - from the symbol.
The underlying integer value is always a - native integer in C but can be a foreign integer type in - HDF.This allows HDF to describe data that might - reside on a foreign architecture, such as data stored in - a file.
The sign and size of the underlying integer - datatype is chosen automatically by the C compiler but - must be fully specified with HDF.Since HDF doesn't require finalization of a - datatype, complete specification of the type must be - supplied before the type is used. Requiring that - information at the time of type creation was a design - decision to simplify the library.
-
- -

The examples below use the following C datatypes: - -

- - - - -
-
-/* English color names */
-typedef enum {
-    RED,
-    GREEN,
-    BLUE,
-    WHITE,
-    BLACK
-} c_en_colors;
-
-/* Spanish color names, reverse order */
-typedef enum {
-    NEGRO
-    BLANCO,
-    AZUL,
-    VERDE,
-    ROJO,
-} c_sp_colors;
-
-/* No enum definition for French names */
-	    
-
- -

Creating HDF Types from C Types

- -

An HDF enumeration datatype can be created from a C - enum type simply by passing pointers to the C - enum values to H5Tenum_insert(). For - instance, to create HDF types for the c_en_colors - type shown above: - -

- - - - -
-
-
-c_en_colors val;
-hid_t hdf_en_colors = H5Tcreate(H5T_ENUM, sizeof(c_en_colors));
-H5Tenum_insert(hdf_en_colors, "RED",   (val=RED,  &val));
-H5Tenum_insert(hdf_en_colors, "GREEN", (val=GREEN,&val));
-H5Tenum_insert(hdf_en_colors, "BLUE",  (val=BLUE, &val));
-H5Tenum_insert(hdf_en_colors, "WHITE", (val=WHITE,&val));
-H5Tenum_insert(hdf_en_colors, "BLACK", (val=BLACK,&val));
-
- -

Name Changes between Applications

- -

Occassionally two applicatons wish to exchange data but they - use different names for the constants they exchange. For - instance, an English and a Spanish program may want to - communicate color names although they use different symbols in - the C enum definitions. The communication is still - possible although the applications must agree on common terms - for the colors. The following example shows the Spanish code to - read the values assuming that the applications have agreed that - the color information will be exchanged using Enlish color - names: - -

- - - - -
-
-
-c_sp_colors val, data[1000];
-hid_t hdf_sp_colors = H5Tcreate(H5T_ENUM, sizeof(c_sp_colors));
-H5Tenum_insert(hdf_sp_colors, "RED",   (val=ROJO,   &val));
-H5Tenum_insert(hdf_sp_colors, "GREEN", (val=VERDE,  &val));
-H5Tenum_insert(hdf_sp_colors, "BLUE",  (val=AZUL,   &val));
-H5Tenum_insert(hdf_sp_colors, "WHITE", (val=BLANCO, &val));
-H5Tenum_insert(hdf_sp_colors, "BLACK", (val=NEGRO,  &val));
-
-H5Dread(dataset, hdf_sp_colors, H5S_ALL, H5S_ALL, H5P_DEFAULT, data);
-
- - -

Symbol Ordering across Applications

- -

Since symbol ordering is completely determined by the integer values - assigned to each symbol in the enum definition, - ordering of enum symbols cannot be preserved across - files like with HDF enumeration types. HDF can convert from one - application's integer values to the other's so a symbol in one - application's C enum gets mapped to the same symbol - in the other application's C enum, but the relative - order of the symbols is not preserved. - -

For example, an application may be defined to use the - definition of c_en_colors defined above where - WHITE is less than BLACK, but some - other application might define the colors in some other - order. If each application defines an HDF enumeration type based - on that application's C enum type then HDF will - modify the integer values as data is communicated from one - application to the other so that a RED value - in the first application is also a RED value in the - other application. - -

A case of this reordering of symbol names was also shown in the - previous code snippet (as well as a change of language), where - HDF changed the integer values so 0 (RED) in the - input file became 4 (ROJO) in the data - array. In the input file, WHITE was less than - BLACK; in the application the opposite is true. - -

In fact, the ability to change the order of symbols is often - convenient when the enumeration type is used only to group - related symbols that don't have any well defined order - relationship. - -

Internationalization

- -

The HDF enumeration type conversion features can also be used - to provide internationalization of debugging output. A program - written with the c_en_colors datatype could define - a separate HDF datatype for languages such as English, Spanish, - and French and cast the enumerated value to one of these HDF - types to print the result. - -

- - - - -
-
-
-c_en_colors val, *data=...;
-
-hid_t hdf_sp_colors = H5Tcreate(H5T_ENUM, sizeof val);
-H5Tenum_insert(hdf_sp_colors, "ROJO",   (val=RED,   &val));
-H5Tenum_insert(hdf_sp_colors, "VERDE",  (val=GREEN, &val));
-H5Tenum_insert(hdf_sp_colors, "AZUL",   (val=BLUE,  &val));
-H5Tenum_insert(hdf_sp_colors, "BLANCO", (val=WHITE, &val));
-H5Tenum_insert(hdf_sp_colors, "NEGRO",  (val=BLACK, &val));
-
-hid_t hdf_fr_colors = H5Tcreate(H5T_ENUM, sizeof val);
-H5Tenum_insert(hdf_fr_colors, "OUGE",  (val=RED,   &val));
-H5Tenum_insert(hdf_fr_colors, "VERT",  (val=GREEN, &val));
-H5Tenum_insert(hdf_fr_colors, "BLEU",  (val=BLUE,  &val));
-H5Tenum_insert(hdf_fr_colors, "BLANC", (val=WHITE, &val));
-H5Tenum_insert(hdf_fr_colors, "NOIR",  (val=BLACK, &val));
-
-void
-nameof(lang_t language, c_en_colors val, char *name, size_t size)
-{
-    switch (language) {
-    case ENGLISH:
-        H5Tenum_nameof(hdf_en_colors, &val, name, size);
-        break;
-    case SPANISH:
-        H5Tenum_nameof(hdf_sp_colors, &val, name, size);
-        break;
-    case FRENCH:
-        H5Tenum_nameof(hdf_fr_colors, &val, name, size);
-        break;
-    }
-}
-
- -

8.10. Goals That Have Been Met

- -

The main goal of enumeration types is to provide communication - of enumerated data using symbolic equivalence. That is, a - symbol written to a dataset by one application should be read as - the same symbol by some other application. - -

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Architecture IndependenceTwo applications shall be able to exchange - enumerated data even when the underlying integer values - have different storage formats. HDF accomplishes this for - enumeration types by building them upon integer types.
Preservation of Order RelationshipThe relative order of symbols shall be - preserved between two applications that use equivalent - enumeration datatypes. Unlike numeric values that have - an implicit ordering, enumerated data has an explicit - order defined by the enumeration datatype and HDF - records this order in the file.
Order IndependenceAn application shall be able to change the - relative ordering of the symbols in an enumeration - datatype. This is accomplished by defining a new type with - different integer values and converting data from one type - to the other.
SubsetsAn application shall be able to read - enumerated data from an archived dataset even after the - application has defined additional members for the - enumeration type. An application shall be able to write - to a dataset when the dataset contains a superset of the - members defined by the application. Similar rules apply - for in-core conversions between enumerated datatypes.
TargetableAn application shall be able to target a - particular architecture or application when storing - enumerated data. This is accomplished by allowing - non-native underlying integer types and converting the - native data to non-native data.
Efficient Data TransferAn application that defines a file dataset - that corresponds to some native C enumerated data array - shall be able to read and write to that dataset directly - using only Posix read and write functions. HDF already - optimizes this case for integers, so the same optimization - will apply to enumerated data. -
Efficient StorageEnumerated data shall be stored in a manner - which is space efficient. HDF stores the enumerated data - as integers and allows the application to chose the size - and format of those integers.
- - - - - - - -

9. Variable-length Datatypes

- -

9.1. Overview And Justification

- -Variable-length (VL) datatypes are sequences of an existing datatype -(atomic, VL, or compound) which are not fixed in length from one dataset location -to another. In essence, they are similar to C character strings -- a sequence of -a type which is pointed to by a particular type of pointer -- although -they are implemented more closely to FORTRAN strings by including an explicit -length in the pointer instead of using a particular value to terminate the -sequence. - -

-VL datatypes are useful to the scientific community in many different ways, -some of which are listed below: -

    -
  • Ragged arrays: Multi-dimensional ragged arrays can be implemented with - the last (fastest changing) dimension being ragged by using a - VL datatype as the type of the element stored. (Or as a field in a - compound datatype.) -
  • Fractal arrays: If a compound datatype has a VL field of another compound - type with VL fields (a nested VL datatype), this can be used to - implement ragged arrays of ragged arrays, to whatever nesting depth is - required for the user. -
  • Polygon lists: A common storage requirement is to efficiently store arrays - of polygons with different numbers of vertices. VL datatypes can be - used to efficiently and succinctly describe an array of polygons with - different numbers of vertices. -
  • Character strings: Perhaps the most common use of VL datatypes will be to - store C-like VL character strings in dataset elements or as attributes - of objects. -
  • Indices: An array of VL object references could be used as an index to - all the objects in a file which contain a particular sequence of - dataset values. Perhaps an array something like the following: -
    -            Value1: Object1, Object3,  Object9
    -            Value2: Object0, Object12, Object14, Object21, Object22
    -            Value3: Object2
    -            Value4: <none>
    -            Value5: Object1, Object10, Object12
    -                .
    -                .
    -        
    -
  • Object Tracking: An array of VL dataset region references can be used as - a method of tracking objects or features appearing in a sequence of - datasets. Perhaps an array of them would look like: -
    -            Feature1: Dataset1:Region,  Dataset3:Region,  Dataset9:Region
    -            Feature2: Dataset0:Region,  Dataset12:Region, Dataset14:Region,
    -                      Dataset21:Region, Dataset22:Region
    -            Feature3: Dataset2:Region
    -            Feature4: <none>
    -            Feature5: Dataset1:Region,  Dataset10:Region, Dataset12:Region
    -                .
    -                .
    -        
    -
- - -

9.2. Variable-length Datatype Memory Management

- -With each element possibly being of different sequence lengths for a -dataset with a VL datatype, the memory for the VL datatype must be dynamically -allocated. Currently there are two methods of managing the memory for -VL datatypes: the standard C malloc/free memory allocation routines or a method -of calling user-defined memory management routines to allocate or free memory. -Since the memory allocated when reading (or writing) may be complicated to -release, an HDF5 routine is provided to traverse a memory buffer and free the -VL datatype information without leaking memory. - - -

Variable-length datatypes cannot be divided

- -VL datatypes are designed so that they cannot be subdivided by the library -with selections, etc. This design was chosen due to the complexities in -specifying selections on each VL element of a dataset through a selection API -that is easy to understand. Also, the selection APIs work on dataspaces, not -on datatypes. At some point in time, we may want to create a way for -dataspaces to have VL components to them and we would need to allow selections -of those VL regions, but that is beyond the scope of this document. - - -

What happens if the library runs out of memory while reading?

- -It is possible for a call to H5Dread to fail while reading in -VL datatype information if the memory required exceeds that which is available. -In this case, the H5Dread call will fail gracefully and any -VL data which has been allocated prior to the memory shortage will be returned -to the system via the memory management routines detailed below. -It may be possible to design a partial read API function at a -later date, if demand for such a function warrants. - - -

Strings as variable-length datatypes

- -Since character strings are a special case of VL data that is implemented -in many different ways on different machines and in different programming -languages, they are handled somewhat differently from other VL datatypes in HDF5. - -

-HDF5 has native VL strings for each language API, which are stored the -same way on disk, but are exported through each language API in a natural way -for that language. When retrieving VL strings from a dataset, users may choose -to have them stored in memory as a native VL string or in HDF5's hvl_t -struct for VL datatypes. - -

-VL strings may be created in one of two ways: by creating a VL datatype with -a base type of H5T_NATIVE_ASCII, H5T_NATIVE_UNICODE, -etc., or by creating a string datatype and setting its length to -H5T_VARIABLE. The second method is used to access -native VL strings in memory. The library will convert between the two types, -but they are stored on disk using different datatypes and have different -memory representations. - -

-Multi-byte character representations, such as UNICODE or wide -characters in C/C++, will need the appropriate character and string datatypes -created so that they can be described properly through the datatype API. -Additional conversions between these types and the current ASCII characters -will also be required. - -

-Variable-width character strings (which might be compressed data or some -other encoding) are not currently handled by this design. We will evaluate -how to implement them based on user feedback. - - -

9.3. Variable-length Datatype API

- -

Creation

- -VL datatypes are created with the H5Tvlen_create() function -as follows: -
-
type_id = H5Tvlen_create(hid_t base_type_id); -
- -

-The base datatype will be the datatype that the sequence is composed of, -characters for character strings, vertex coordinates for polygon lists, etc. -The base datatype specified for the VL datatype can be of any HDF5 datatype, -including another VL datatype, a compound datatype, or an atomic datatype. - - -

Query base datatype of VL datatype

- -It may be necessary to know the base datatype of a VL datatype before -memory is allocated, etc. The base datatype is queried with the -H5Tget_super() function, described in the H5T documentation. - - -

Query minimum memory required for VL information

- -It order to predict the memory usage that H5Dread may need -to allocate to store VL data while reading the data, the -H5Dget_vlen_size() function is provided: -
-
herr_t - H5Dget_vlen_buf_size(hid_t dataset_id, - hid_t type_id, - hid_t space_id, - hsize_t *size) -
- (This function is not implemented in Release 1.2.) - -

-This routine checks the number of bytes required to store the VL data from -the dataset, using the space_id for the selection in the dataset -on disk and the type_id for the memory representation of the -VL data in memory. The *size value is modified according to -how many bytes are required to store the VL data in memory. - - -

Specifying how to manage memory for the VL datatype

- -The memory management method is determined by dataset transfer properties -passed into the H5Dread and H5Dwrite functions -with the dataset transfer property list. - -

-Default memory management is set by using H5P_DEFAULT -for the dataset transfer property list identifier. -If H5P_DEFAULT is used with H5Dread, -the system malloc and free calls -will be used for allocating and freeing memory. -In such a case, H5P_DEFAULT should also be passed -as the property list identifier to H5Dvlen_reclaim. - -

-The rest of this subsection is relevant only to those who choose -not to use default memory management. - -

-The user can choose whether to use the -system malloc and free calls or -user-defined, or custom, memory management functions. -If user-defined memory management functions are to be used, -the memory allocation and free routines must be defined via -H5Pset_vlen_mem_manager(), as follows: -

-
herr_t - H5Pset_vlen_mem_manager(hid_t plist_id, - H5MM_allocate_t alloc, - void *alloc_info, - H5MM_free_t free, - void *free_info) -
- - -

-The alloc and free parameters -identify the memory management routines to be used. -If the user has defined custom memory management routines, -alloc and/or free should be set to make -those routine calls (i.e., the name of the routine is used as -the value of the parameter); -if the user prefers to use the system's malloc -and/or free, the alloc and -free parameters, respectively, should be set to - NULL -

-The prototypes for the user-defined functions would appear as follows: -

-
typedef void - *(*H5MM_allocate_t)(size_t size, - void *info) ; -
typedef void - (*H5MM_free_t)(void *mem, - void *free_info) ; -
- -

-The alloc_info and free_info parameters can be -used to pass along any required information to the user's memory management -routines. - -

-In summary, if the user has defined custom memory management -routines, the name(s) of the routines are passed in the -alloc and free parameters and the -custom routines' parameters are passed in the -alloc_info and free_info parameters. -If the user wishes to use the system malloc and -free functions, the alloc and/or -free parameters are set to NULL -and the alloc_info and free_info -parameters are ignored. - -

Recovering memory from VL buffers read in

- -The complex memory buffers created for a VL datatype may be reclaimed with -the H5Dvlen_reclaim() function call, as follows: -
-
herr_t - H5Dvlen_reclaim(hid_t type_id, - hid_t space_id, - hid_t plist_id, - void *buf); -
- -

-The type_id must be the datatype stored in the buffer, -space_id describes the selection for the memory buffer -to free the VL datatypes within, -plist_id is the dataset transfer property list which -was used for the I/O transfer to create the buffer, and -buf is the pointer to the buffer to free the VL memory within. -The VL structures (hvl_t) in the user's buffer are -modified to zero out the VL information after it has been freed. - -

-If nested VL datatypes were used to create the buffer, -this routine frees them from the bottom up, -releasing all the memory without creating memory leaks. - - -

9.4. Code Examples

- -The following example creates the following one-dimensional array -of size 4 of variable-length datatype. -
-          0 10 20 30
-            11 21 31
-               22 32
-                  33
-
-Each element of the VL datatype is of H5T_NATIVE_UINT type. -

-The array is stored in the dataset and then read back into memory. -Default memory management routines are used for writing the VL data. -Custom memory management routines are used for reading the VL data and -reclaiming memory space. - -

- - - - - -

Example: Variable-length Datatypes

-
-#include <hdf5.h>
-
-#define FILE   "vltypes.h5"
-#define MAX(X,Y)        ((X)>(Y)?(X):(Y))
-
-/* 1-D dataset with fixed dimensions */
-#define SPACE_NAME  "Space"
-#define SPACE_RANK	1
-#define SPACE_DIM	4
-
-void *vltypes_alloc_custom(size_t size, void *info);
-void vltypes_free_custom(void *mem, void *info);
-
-/****************************************************************
-**
-**  vltypes_alloc_custom():  VL datatype custom memory
-**      allocation routine.  This routine just uses malloc to
-**      allocate the memory and increments the amount of memory
-**      allocated.
-** 
-****************************************************************/
-void *vltypes_alloc_custom(size_t size, void *info)
-{
-
-    void *ret_value=NULL;       /* Pointer to return */
-    int *mem_used=(int *)info;  /* Get the pointer to the memory used */
-    size_t extra;               /* Extra space needed */
-
-    /*
-     *  This weird contortion is required on the DEC Alpha to keep the
-     *  alignment correct.
-     */
-    extra=MAX(sizeof(void *),sizeof(int));
-
-    if((ret_value=(void *)malloc(extra+size))!=NULL) {
-        *(int *)ret_value=size;
-        *mem_used+=size;
-    } /* end if */
-    ret_value=((unsigned char *)ret_value)+extra;
-    return(ret_value);
-}
-/******************************************************************
-**  vltypes_free_custom(): VL datatype custom memory
-**      allocation routine.  This routine just uses free to
-**      release the memory and decrements the amount of memory
-**      allocated.
-** ****************************************************************/
-void vltypes_free_custom(void *_mem, void *info)
-
-{  
-    unsigned char *mem;
-    int *mem_used=(int *)info;  /* Get the pointer to the memory used */
-    size_t extra;               /* Extra space needed */    
-    /*
-     *  This weird contortion is required on the DEC Alpha to keep the
-     *  alignment correct.      
-     */ 
-    extra=MAX(sizeof(void *),sizeof(int));
-    if(_mem!=NULL) {        
-        mem=((unsigned char *)_mem)-extra;
-       *mem_used-=*(int *)mem; 
-       free(mem); 
-    } /* end if */
-}
-
-int main(void)
-
-{   
-    hvl_t wdata[SPACE_DIM];   /* Information to write */
-    hvl_t rdata[SPACE_DIM];   /* Information read in */
-    hid_t		fid;	   /* HDF5 File IDs */  
-    hid_t		dataset;   /* Dataset ID */
-    hid_t		sid;       /* Dataspace ID */
-    hid_t		tid;       /* Datatype ID	   	 */
-    hid_t       xfer_pid;   /* Dataset transfer property list ID */
-    hsize_t		dims[] = {SPACE_DIM};
-    uint       i,j;        /* counting variables */
-    int         mem_used=0; /* Memory used during allocation */
-    herr_t		ret;		/* Generic return value	 */
-
-    /*
-     * Allocate and initialize VL data to write 
-     */
-    for(i=0; i<SPACE_DIM; i++) {
-
-        wdata[i].p= (unsigned int *)malloc((i+1)*sizeof(unsigned int));
-        wdata[i].len=i+1;
-        for(j=0; j<(i+1); j++)
-            ((unsigned int *)wdata[i].p)[j]=i*10+j;
-    } /* end for */
-
-    /* 
-     * Create file. 
-     */
-    fid = H5Fcreate(FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
-
-    /* 
-     * Create dataspace for datasets. 
-     */
-    sid = H5Screate_simple(SPACE_RANK, dims, NULL);
-
-    /* 
-     * Create a datatype to refer to. 
-     */
-    tid = H5Tvlen_create (H5T_NATIVE_UINT);
-
-    /* 
-     * Create a dataset. 
-     */
-    dataset=H5Dcreate(fid, "Dataset", tid, sid, H5P_DEFAULT);
-
-    /* 
-     * Write dataset to disk. 
-     */
-    ret=H5Dwrite(dataset, tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata);
-
-    /* 
-     * Change to the custom memory allocation routines for reading 
-     * VL data 
-     */
-    xfer_pid=H5Pcreate(H5P_DATASET_XFER);
-
-    ret=H5Pset_vlen_mem_manager(xfer_pid, vltypes_alloc_custom,
-                                &mem_used, vltypes_free_custom, 
-                                &mem_used);
-
-    /* 
-     * Read dataset from disk. vltypes_alloc_custom and
-     * will be used to manage memory.
-     */
-    ret=H5Dread(dataset, tid, H5S_ALL, H5S_ALL, xfer_pid, rdata);   
-
-    /* 
-     * Display data read in 
-     */
-    for(i=0; i<SPACE_DIM; i++) {
-        printf("%d-th element length is %d \n", i, 
-                                   (unsigned) rdata[i].len);
-        for(j=0; j<rdata[i].len; j++) {
-            printf(" %d ",((unsigned int *)rdata[i].p)[j] );   
-        } 
-        printf("\n"); 
-    } /* end for */
-
-    /* 
-     * Reclaim the read VL data. vltypes_free_custom will be used 
-     * to reclaim the space. 
-     */
-    ret=H5Dvlen_reclaim(tid, sid, xfer_pid, rdata);
-
-    /* 
-     * Reclaim the write VL data.  C language free function will be 
-     * used to reclaim space. 
-     */
-    ret=H5Dvlen_reclaim(tid, sid, H5P_DEFAULT, wdata);
-
-    /* 
-     * Close Dataset 
-     */
-    ret = H5Dclose(dataset);
-
-    /* 
-     * Close datatype 
-     */
-    ret = H5Tclose(tid);
-
-    /* 
-     * Close disk dataspace 
-     */
-    ret = H5Sclose(sid);
-    
-    /* 
-     * Close dataset transfer property list 
-     */
-    ret = H5Pclose(xfer_pid);
-    
-    /* 
-     * Close file 
-     */
-    ret = H5Fclose(fid);
-
-} 
-      
-
-
- -And the output from this sample code would be as follows: - -
- - - - - -

Example: Variable-length Datatypes, Sample Output

-
-0-th element length is 1 
-0 
-1-th element length is 2 
-10  11 
-2-th element length is 3 
-20  21  22 
-3-th element length is 4 
-30  31  32  33 
-      
-
-
- -

-For further samples of VL datatype code, see the tests in test/tvltypes.c -in the HDF5 distribution. - - - - -

10. Array Datatypes

- -The array class of datatypes, H5T_ARRAY, allows the -construction of true, homogeneous, multi-dimensional arrays. -Since these are homogeneous arrays, each element of the array will be -of the same datatype, designated at the time the array is created. - -

-Arrays can be nested. -Not only is an array datatype used as an element of an HDF5 dataset, -but the elements of an array datatype may be of any datatype, -including another array datatype. - -

-Array datatypes cannot be subdivided for I/O; the entire array must -be transferred from one dataset to another. - -

-Within the limitations outlined in the next paragraph, array datatypes -may be N-dimensional and of any dimension size. -Unlimited dimensions, however, are not supported. -Functionality similar to unlimited dimension arrays is available through -the use of variable-length datatypes. - -

-The maximum number of dimensions, i.e., the maximum rank, of an array -datatype is specified by the HDF5 library constant H5S_MAX_RANK. -The minimum rank is 1 (one). -All dimension sizes must be greater than 0 (zero). - -

-One array dataype may only be converted to another array datatype -if the number of dimensions and the sizes of the dimensions are equal -and the datatype of the first array's elements can be converted -to the datatype of the second array's elements. - -

10.1 Array Datatype APIs

- -The functions for creating and manipulating array datadypes are -as follows: - - - - - - - -
H5Tarray_create -    - Creates an array datatype. -
- hid_t H5Tarray_create( - hid_t base, - int rank, - const hsize_t dims[/*rank*/], - const int perm[/*rank*/] - ) - -
H5Tget_array_ndims -    - Retrieves the rank of the array datatype. -
- int H5Tget_array_ndims( - hid_t adtype_id - ) - -
H5Tget_array_dims -    - Retrieves the dimension sizes of the array datatype. -
- int H5Tget_array_dims( - hid_t adtype_id, - hsize_t *dims[], - int *perm[] - ) - -
-
- - -

10.2 Transition Issues in Adapting Existing Software
-       -(Transition to HDF5 Release 1.4 Only)

- -The array datatype class is new with Release 1.4; -prior releases included an array element for compound datatypes. -

-The use of the array datatype class will not interfere with the -use of existing compound datatypes. Applications may continue to -read and write the older field arrays, but they will no longer be -able to create array fields in newly-defined compound datatypes. -

-Existing array fields will be transparently mapped to array datatypes -when they are read in. - - -

10.3 Code Example

- -The following example creates an array datatype and a dataset -containing elements of the array datatype in an HDF5 file. -It then writes the dataset to the file. -

- -

- - - - - -

Example: Array Datatype

-
-#include <hdf5.h>
-
-#define FILE        "SDS_array_type.h5"
-#define DATASETNAME "IntArray" 
-#define ARRAY_DIM1     5                      /* array dimensions and rank */
-#define ARRAY_DIM2     4 
-#define ARRAY_RANK     2 
-#define SPACE_DIM     10                      /* dataset dimensions and rank */ 
-#define RANK  1 
-
-int
-main (void)
-{
-    hid_t       file, dataset;         /* file and dataset handles */
-    hid_t       datatype, dataspace;   /* handles */
-    hsize_t     sdims[] = {SPACE_DIM};              /* dataset dimensions */
-    hsize_t     adims[] = {ARRAY_DIM1, ARRAY_DIM2}; /* array dimensions */
-    hsize_t     adims_out[2]; 
-    herr_t      status;                             
-    int         data[SPACE_DIM][ARRAY_DIM1][ARRAY_DIM2];   /* data to write */
-    int         k, i, j;
-    int         array_rank_out; 
-
-    /* 
-     * Data  and output buffer initialization. 
-     */
-    for (k = 0; k < SPACE_DIM; k++) {
-      for (j = 0; j < ARRAY_DIM1; j++) {
-	for (i = 0; i < ARRAY_DIM2; i++)
-               data[k][j][i] = k;
-      }
-    }     
-    /*
-     * Create a new file using H5F_ACC_TRUNC access,
-     * default file creation properties, and default file
-     * access properties.
-     */
-    file = H5Fcreate(FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
-
-    /*
-     * Describe the size of the array and create the data space for fixed
-     * size dataset. 
-     */
-    dataspace = H5Screate_simple(RANK, sdims, NULL); 
-
-    /* 
-     * Define array datatype for the data in the file.
-     */
-    datatype = H5Tarray_create(H5T_NATIVE_INT, ARRAY_RANK, adims, NULL);
-
-    /*
-     * Create a new dataset within the file using defined dataspace and
-     * datatype and default dataset creation properties.
-     */
-    dataset = H5Dcreate(file, DATASETNAME, datatype, dataspace,
-			H5P_DEFAULT);
-
-    /*
-     * Write the data to the dataset using default transfer properties.
-     */
-    status = H5Dwrite(dataset, datatype, H5S_ALL, H5S_ALL,
-		      H5P_DEFAULT, data);
-
-
-    /*
-     * Close/release resources.
-     */
-    H5Sclose(dataspace);
-    H5Tclose(datatype);
-    H5Dclose(dataset);
-    /*
-     * Reopen dataset, and return information about its datatype.
-     */
-    dataset = H5Dopen(file, DATASETNAME);
-    datatype = H5Dget_type(dataset);
-    array_rank_out = H5Tget_array_ndims(datatype);
-    status = H5Tget_array_dims(datatype, adims_out, NULL); 
-    printf(" Array datatype rank is %d \n", array_rank_out);
-    printf(" Array dimensions are %d x %d \n", (int)adims_out[0],   
-                                               (int)adims_out[1]);
-
-    H5Tclose(datatype);
-    H5Dclose(dataset);
-    H5Fclose(file);
- 
-    return 0;
-}     
-      
-
-
- - - -

11. Sharing Datatypes among Datasets

- -

If a file has lots of datasets which have a common datatype, - then the file could be made smaller by having all the datasets - share a single datatype. Instead of storing a copy of the - datatype in each dataset object header, a single datatype is stored - and the object headers point to it. The space savings is - probably only significant for datasets with a compound datatype, - since the atomic datatypes can be described with just a few - bytes anyway. - -

To create a bunch of datasets that share a single datatype - just create the datasets with a committed (named) datatype. - -

-

- - - - - -

Example: Shared Datatypes

-

To create two datasets that share a common datatype - one just commits the datatype, giving it a name, and - then uses that datatype to create the datasets. - -

-hid_t t1 = ...some transient type...;
-H5Tcommit (file, "shared_type", t1);
-hid_t dset1 = H5Dcreate (file, "dset1", t1, space, H5P_DEFAULT);
-hid_t dset2 = H5Dcreate (file, "dset2", t1, space, H5P_DEFAULT);
-	      
- -

And to create two additional datasets later which - share the same type as the first two datasets: - -

-hid_t dset1 = H5Dopen (file, "dset1");
-hid_t t2 = H5Dget_type (dset1);
-hid_t dset3 = H5Dcreate (file, "dset3", t2, space, H5P_DEFAULT);
-hid_t dset4 = H5Dcreate (file, "dset4", t2, space, H5P_DEFAULT);
-	      
-
-
- - - - - -

12. Data Conversion

-
- -

The library is capable of converting data from one type to - another and does so automatically when reading or writing the - raw data of a dataset, attribute data, or fill values. The - application can also change the type of data stored in an array. - -

In order to insure that data conversion exceeds disk I/O rates, - common data conversion paths can be hand-tuned and optimized for - performance. The library contains very efficient code for - conversions between most native datatypes and a few non-native - datatypes, but if a hand-tuned conversion function is not - available, then the library falls back to a slower but more - general conversion function. The application programmer can - define additional conversion functions when the libraries - repertoire is insufficient. In fact, if an application does - define a conversion function which would be of general interest, - we request that the function be submitted to the HDF5 - development team for inclusion in the library. - -

Note: The HDF5 library contains a deliberately limited - set of conversion routines. It can convert from one integer - format to another, from one floating point format to another, - and from one struct to another. It can also perform byte - swapping when the source and destination types are otherwise the - same. The library does not contain any functions for converting - data between integer and floating point formats. It is - anticipated that some users will find it necessary to develop - float to integer or integer to float conversion functions at the - application level; users are invited to submit those functions - to be considered for inclusion in future versions of the - library. - -

A conversion path contains a source and destination datatype - and each path contains a hard conversion function - and/or a soft conversion function. The only difference - between hard and soft functions is the way in which the library - chooses which function applies: A hard function applies to a - specific conversion path while a soft function may apply to - multiple paths. When both hard and soft functions apply to a - conversion path, then the hard function is favored and when - multiple soft functions apply, the one defined last is favored. - -

A data conversion function is of type H5T_conv_t, - which is defined as follows: - -

typedef herr_t (*H5T_conv_t) (hid_t src_id, 
-                              hid_t dst_id, 
-                              H5T_cdata_t *cdata,
-                              hsize_t nelmts, 
-                              size_t buf_stride, 
-                              size_t bkg_stride, 
-                              void *buffer, 
-                              void *bkg_buffer,
-                              hid_t dset_xfer_plist);
- - -

The conversion function is called with - the source and destination datatypes (src_id and - dst_id), - the path-constant data struct (cdata), - the number of instances of the datatype to convert (nelmts), - a conversion buffer (buffer) which initially contains - an array of data having the source type and on return will - contain an array of data having the destination type, - a temporary or background buffer (bkg_buffer, - see description of H5T_BKG_YES below), - conversion and background buffer strides (buf_stride and - bkg_stride) that indicate what data is to be converted, and - a dataset transfer properties list (dset_xfer_plist). - -

buf_stride and bkg_stride are in bytes and - are related to the size of the datatype. - If every data element is to be converted, the parameter's value - is equal to the size of the datatype; - if every other data element is to be converted, the parameter's value - is equal to twice the size of the datatype; etc. - -

dset_xfer_plist may contain properties that are passed - to the read and write calls. - This parameter is currently used only with variable-length data. - -

bkg_buffer and bkg_stride are used only with - compound datatypes. - -

The path-constant data struct, H5T_cdata_t, - is declared as follows: - -

typedef struct *H5T_cdata_t (H5T_cmd_t command, 
-                             H5T_bkg_t need_bkg, 
-                             hbool_t *recalc,
-                             void *priv)
- -

The command field of the cdata argument - determines what happens within the conversion function. It's - values can be: - -

-
H5T_CONV_INIT -
This command is to hard conversion functions when they're - registered or soft conversion functions when the library is - determining if a conversion can be used for a particular path. - The src_type and dst_type are the end-points - of the path being queried and cdata is all zero. The - library should examine the source and destination types and - return zero if the conversion is possible and negative - otherwise (hard conversions need not do this since they've - presumably been registered only on paths they support). If - the conversion is possible the library may allocate and - initialize private data and assign the pointer to the - priv field of cdata (or private data can - be initialized later). It should also initialize the - need_bkg field described below. The buf - and background pointers will be null pointers. - -

-
H5T_CONV_CONV -
This command indicates that data points should be converted. - The conversion function should initialize the - priv field of cdata if it wasn't - initialize during the H5T_CONV_INIT command and - then convert nelmts instances of the - src_type to the dst_type. The - buffer serves as both input and output. The - background buffer is supplied according to the value - of the need_bkg field of cdata (the - values are described below). - -

-
H5T_CONV_FREE -
The conversion function is about to be removed from some - path and the private data (the - cdata->priv pointer) should be freed and - set to null. All other pointer arguments are null, the - src_type and dst_type are invalid - (negative), and the nelmts argument is zero. - -

-
Others... -
Other commands might be implemented later and conversion - functions that don't support those commands should return a - negative value. -
- - -

Whether a background buffer is supplied to a conversion - function, and whether the background buffer is initialized - depends on the value of cdata->need_bkg - which the conversion function should have initialized during the - H5T_CONV_INIT command. It can have one of these values: - -

-
H5T_BKG_NONE -
No background buffer will be supplied to the conversion - function. This is the default. - -

-
H5T_BKG_TEMP -
A background buffer will be supplied but it will not be - initialized. This is useful for those functions requiring some - extra buffer space as the buffer can probably be allocated - more efficiently by the library (the application can supply - the buffer as part of the dataset transfer property list). - -

-
H5T_BKG_YES -
An initialized background buffer is passed to the conversion - function. The buffer is initialized with the current values - of the destination for the data which is passed in through the - buffer argument. It can be used to "fill in between - the cracks". For instance, if the destination type is a - compound datatype and we are initializing only part of the - compound datatype from the source type then the background - buffer can be used to initialize the other part of the - destination. -
- -

The recalc field of cdata is set when the - conversion path table changes. It can be used by conversion - function that cache other conversion paths so they know when - their cache needs to be recomputed. - - -

Once a conversion function is written it can be registered and - unregistered with these functions: - -

-
herr_t H5Tregister(H5T_pers_t pers, const - char *name, hid_t src_type, hid_t - dest_type, H5T_conv_t func) -
Once a conversion function is written, the library must be - notified so it can be used. The function can be registered as - a hard (H5T_PERS_HARD) or soft - (H5T_PERS_SOFT) conversion depending on the value - of pers, displacing any previous conversions for all - applicable paths. The name is used only for - debugging but must be supplied. If pers is - H5T_PERS_SOFT then only the type classes of the - src_type and dst_type are used. For - instance, to register a general soft conversion function that - can be applied to any integer to integer conversion one could - say: H5Tregister(H5T_PERS_SOFT, "i2i", H5T_NATIVE_INT, - H5T_NATIVE_INT, convert_i2i). One special conversion - path called the "no-op" conversion path is always defined by - the library and used as the conversion function when no data - transformation is necessary. The application can redefine this - path by specifying a new hard conversion function with a - negative value for both the source and destination datatypes, - but the library might not call the function under certain - circumstances. - -

-
herr_t H5Tunregister (H5T_pers_t pers, const - char *name, hid_t src_type, hid_t - dest_type, H5T_conv_t func) -
Any conversion path or function that matches the critera - specified by a call to this function is removed from the type - conversion table. All fields have the same interpretation as - for H5Tregister() with the added feature that any - (or all) may be wild cards. The - H5T_PERS_DONTCARE constant should be used to - indicate a wild card for the pers argument. The wild - card name is the null pointer or empty string, the - wild card for the src_type and dest_type - arguments is any negative value, and the wild card for the - func argument is the null pointer. The special no-op - conversion path is never removed by this function. -
- -

-

- - - - - -

Example: A conversion - function

-

Here's an example application-level function that - converts Cray unsigned short to any other - 16-bit unsigned big-endian integer. A cray - short is a big-endian value which has 32 - bits of precision in the high-order bits of a 64-bit - word. - -

- 1 typedef struct {
- 2     size_t dst_size;
- 3     int direction;
- 4 } cray_ushort2be_t;
- 5 
- 6 herr_t
- 7 cray_ushort2be (hid_t src, hid_t dst,
- 8                 H5T_cdata_t *cdata, hsize_t nelmts, 
- 9                 size_t buf_str, size_t bkg_str, void *buf, 
-10                 const void *background, hid_t plist)
-11 {
-12     unsigned char *src = (unsigned char *)buf;
-13     unsigned char *dst = src;
-14     cray_ushort2be_t *priv = NULL;
-15 
-16     switch (cdata->command) {
-17     case H5T_CONV_INIT:
-18         /*
-19          * We are being queried to see if we handle this
-20          * conversion.  We can handle conversion from
-21          * Cray unsigned short to any other big-endian
-22          * unsigned integer that doesn't have padding.
-23          */
-24         if (!H5Tequal (src, H5T_CRAY_USHORT) ||
-25             H5T_ORDER_BE != H5Tget_order (dst) ||
-26             H5T_SGN_NONE != H5Tget_signed (dst) ||
-27             8*H5Tget_size (dst) != H5Tget_precision (dst)) {
-28             return -1;
-29         }
-30 
-31         /*
-32          * Initialize private data.  If the destination size
-33          * is larger than the source size, then we must
-34          * process the elements from right to left.
-35          */
-36         cdata->priv = priv = malloc (sizeof(cray_ushort2be_t));
-37         priv->dst_size = H5Tget_size (dst);
-38         if (priv->dst_size>8) {
-39             priv->direction = -1;
-40         } else {
-41             priv->direction = 1;
-42         }
-43         break;
-44 
-45     case H5T_CONV_FREE:
-46         /*
-47          * Free private data.
-48          */
-49         free (cdata->priv);
-50         cdata->priv = NULL;
-51         break;
-52 
-53     case H5T_CONV_CONV:
-54         /*
-55          * Convert each element, watch out for overlap src
-56          * with dst on the left-most element of the buffer.
-57          */
-58         priv = (cray_ushort2be_t *)(cdata->priv);
-59         if (priv->direction<0) {
-60             src += (nelmts - 1) * 8;
-61             dst += (nelmts - 1) * dst_size;
-62         }
-63         for (i=0; i<n; i++) {
-64             if (src==dst && dst_size<4) {
-65                 for (j=0; j<dst_size; j++) {
-66                     dst[j] = src[j+4-dst_size];
-67                 }
-68             } else {
-69                 for (j=0; j<4 && j<dst_size; j++) {
-70                     dst[dst_size-(j+1)] = src[3-j];
-71                 }
-72                 for (j=4; j<dst_size; j++) {
-73                     dst[dst_size-(j+1)] = 0;
-74                 }
-75             }
-76             src += 8 * direction;
-77             dst += dst_size * direction;
-78         }
-79         break;
-80 
-81     default:
-82         /*
-83          * Unknown command.
-84          */
-85         return -1;
-86     }
-87     return 0;
-88 }
-	      
- -

The background argument is ignored since - it's generally not applicable to atomic datatypes. -

-
- -

-

- - - - - -

Example: Soft - Registration

-

The convesion function described in the previous - example applies to more than one conversion path. - Instead of enumerating all possible paths, we register - it as a soft function and allow it to decide which - paths it can handle. - -

-H5Tregister(H5T_PERS_SOFT, "cus2be",
-            H5T_NATIVE_INT, H5T_NATIVE_INT,
-            cray_ushort2be);
-	      
- -

This causes it to be consulted for any conversion - from an integer type to another integer type. The - first argument is just a short identifier which will - be printed with the datatype conversion statistics. -

-
- - -

NOTE: The idea of a master soft list and being able to - query conversion functions for their abilities tries to overcome - problems we saw with AIO. Namely, that there was a dichotomy - between generic conversions and specific conversions that made - it very difficult to write a conversion function that operated - on, say, integers of any size and order as long as they don't - have zero padding. The AIO mechanism required such a function - to be explicitly registered (like - H5Tregister_hard()) for each an every possible - conversion path whether that conversion path was actually used - or not.

- - -
-
- - - -
- HDF5 documents and links 
- Introduction to HDF5 
- HDF5 Reference Manual 
- HDF5 User's Guide for Release 1.6 
- -
- And in this document, the - HDF5 User's Guide from Release 1.4.5:     -
- Files   - Datasets   - Datatypes   - Dataspaces   - Groups   -
- References   - Attributes   - Property Lists   - Error Handling   -
- Filters   - Caching   - Chunking   - Mounting Files   -
- Performance   - Debugging   - Environment   - DDL   -
-
-
-
-HDF Help Desk -
-Describes HDF5 Release 1.4.5, February 2003 -
- -Last modified: 2 August 2001 - - - - - diff --git a/doc/html/DatatypesEnum.html b/doc/html/DatatypesEnum.html deleted file mode 100644 index 607030a..0000000 --- a/doc/html/DatatypesEnum.html +++ /dev/null @@ -1,926 +0,0 @@ - - - - Enumeration Data Types in the Data Type Interface (H5T) - - - - - -
-
- - - -
- Introduction to HDF5 
- HDF5 Reference Manual 
- Other HDF5 documents and links 
- -
- And in this document, the - HDF5 User's Guide:     - Files   -
- Datasets   - Data Types   - Dataspaces   - Groups   - References   -
- Attributes   - Property Lists   - Error Handling   - Filters   - Caching   -
- Chunking   - Debugging   - Environment   - DDL   - Ragged Arrays   - -
-
-
- - -

The Data Type Interface (H5T) (contitnued)

- -

- (Return to Data Types Interface (H5T).) - - -

7. Enumeration Data Types

- -

7.1. Introduction

- -

An HDF enumeration data type is a 1:1 mapping between a set of - symbols and a set of integer values, and an order is imposed on - the symbols by their integer values. The symbols are passed - between the application and library as character strings and all - the values for a particular enumeration type are of the same - integer type, which is not necessarily a native type. - -

7.2. Creation

- -

Creation of an enumeration data type resembles creation of a - compound data type: first an empty enumeration type is created, - then members are added to the type, then the type is optionally - locked. - -

-
hid_t H5Tcreate(H5T_class_t type_class, - size_t size) -
This function creates a new empty enumeration data type based - on a native signed integer type. The first argument is the - constant H5T_ENUM and the second argument is the - size in bytes of the native integer on which the enumeration - type is based. If the architecture does not support a native - signed integer of the specified size then an error is - returned. - -
-/* Based on a native signed short */
-hid_t hdf_en_colors = H5Tcreate(H5T_ENUM, sizeof(short));
- - -
hid_t H5Tenum_create(hid_t base) -
This function creates a new empty enumeration data type based - on some integer data type base and is a - generalization of the H5Tcreate() function. This - function is useful when creating an enumeration type based on - some non-native integer data type, but it can be used for - native types as well. - -
-/* Based on a native unsigned short */
-hid_t hdf_en_colors_1 = H5Tenum_create(H5T_NATIVE_USHORT);
-
-/* Based on a MIPS 16-bit unsigned integer */
-hid_t hdf_en_colors_2 = H5Tenum_create(H5T_MIPS_UINT16);
-
-/* Based on a big-endian 16-bit unsigned integer */
-hid_t hdf_en_colors_3 = H5Tenum_create(H5T_STD_U16BE);
- - -
herr_t H5Tenum_insert(hid_t etype, const char - *symbol, void *value) -
Members are inserted into the enumeration data type - etype with this function. Each member has a symbolic - name symbol and some integer representation - value. The value argument must point to a value - of the same data type as specified when the enumeration type - was created. The order of member insertion is not important - but all symbol names and values must be unique within a - particular enumeration type. - -
-short val;
-H5Tenum_insert(hdf_en_colors, "RED",   (val=0,&val));
-H5Tenum_insert(hdf_en_colors, "GREEN", (val=1,&val));
-H5Tenum_insert(hdf_en_colors, "BLUE",  (val=2,&val));
-H5Tenum_insert(hdf_en_colors, "WHITE", (val=3,&val));
-H5Tenum_insert(hdf_en_colors, "BLACK", (val=4,&val));
- - -
herr_t H5Tlock(hid_t etype) -
This function locks a data type so it cannot be modified or - freed unless the entire HDF5 library is closed. Its use is - completely optional but using it on an application data type - makes that data type act like a predefined data type. - -
-H5Tlock(hdf_en_colors);
- -
- -

7.3. Integer Operations

- -

Because an enumeration data type is derived from an integer - data type, any operation which can be performed on integer data - types can also be performed on enumeration data types. This - includes: - -

-

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
H5Topen()H5Tcreate()H5Tcopy()H5Tclose()
H5Tequal()H5Tlock()H5Tcommit()H5Tcommitted()
H5Tget_class()H5Tget_size()H5Tget_order()H5Tget_pad()
H5Tget_precision()H5Tget_offset()H5Tget_sign()H5Tset_size()
H5Tset_order()H5Tset_precision()H5Tset_offset()H5Tset_pad()
H5Tset_sign()
-
- -

In addition, the new function H5Tget_super() will - be defined for all data types that are derived from existing - types (currently just enumeration types). - -

-
hid_t H5Tget_super(hid_t type) -
Return the data type from which type is - derived. When type is an enumeration data type then - the returned value will be an integer data type but not - necessarily a native type. One use of this function would be - to create a new enumeration type based on the same underlying - integer type and values but with possibly different symbols. - -
-hid_t itype = H5Tget_super(hdf_en_colors);
-hid_t hdf_fr_colors = H5Tenum_create(itype);
-H5Tclose(itype);
-
-short val;
-H5Tenum_insert(hdf_fr_colors, "ouge",  (val=0,&val));
-H5Tenum_insert(hdf_fr_colors, "vert",  (val=1,&val));
-H5Tenum_insert(hdf_fr_colors, "bleu",  (val=2,&val));
-H5Tenum_insert(hdf_fr_colors, "blanc", (val=3,&val));
-H5Tenum_insert(hdf_fr_colors, "noir",  (val=4,&val));
-H5Tlock(hdf_fr_colors);
-
- -

7.4. Type Functions

- -

A small set of functions is available for querying properties - of an enumeration type. These functions are likely to be used - by browsers to display data type information. - -

-
int H5Tget_nmembers(hid_t etype) -
When given an enumeration data type etype this - function returns the number of members defined for that - type. This function is already implemented for compound data - types. - -

-
char *H5Tget_member_name(hid_t etype, unsigned - membno) -
Given an enumeration data type etype this function - returns the symbol name for the member indexed by - membno. Members are numbered from zero to - N-1 where N is the return value from - H5Tget_nmembers(). The members are stored in no - particular order. This function is already implemented for - compound data types. If an error occurs then the null pointer - is returned. The return value should be freed by calling - free(). - -

-
herr_t H5Tget_member_value(hid_t etype, unsigned - membno, void *value/*out*/) -
Given an enumeration data type etype this function - returns the value associated with the member indexed by - membno (as described for - H5Tget_member_name()). The value returned - is in the domain of the underlying integer - data type which is often a native integer type. The - application should ensure that the memory pointed to by - value is large enough to contain the result (the size - can be obtained by calling H5Tget_size() on - either the enumeration type or the underlying integer type - when the type is not known by the C compiler. - -
-int n = H5Tget_nmembers(hdf_en_colors);
-unsigned u;
-for (u=0; u<(unsigned)n; u++) {
-    char *symbol = H5Tget_member_name(hdf_en_colors, u);
-    short val;
-    H5Tget_member_value(hdf_en_colors, u, &val);
-    printf("#%u %20s = %d\n", u, symbol, val);
-    free(symbol);
-}
- -

- Output: -

-#0                BLACK = 4
-#1                 BLUE = 2
-#2                GREEN = 1
-#3                  RED = 0
-#4                WHITE = 3
-
- -

7.5. Data Functions

- -

In addition to querying about the enumeration type properties, - an application may want to make queries about enumerated - data. These functions perform efficient mappings between symbol - names and values. - -

-
herr_t H5Tenum_valueof(hid_t etype, const char - *symbol, void *value/*out*/) -
Given an enumeration data type etype this function - returns through value the bit pattern associated with - the symbol name symbol. The value argument - should point to memory which is large enough to hold the result, - which is returned as the underlying integer data type specified - when the enumeration type was created, often a native integer - type. - -

-
herr_t H5Tenum_nameof(hid_t etype, void - *value, char *symbol, size_t - size) -
This function translates a bit pattern pointed to by - value to a symbol name according to the mapping - defined in the enumeration data type etype and stores - at most size characters of that name (counting the - null terminator) to the symbol buffer. If the name is - longer than the result buffer then the result is not null - terminated and the function returns failure. If value - points to a bit pattern which is not in the domain of the - enumeration type then the first byte of the symbol - buffer is set to zero and the function fails. - -
-short data[1000] = {4, 2, 0, 0, 5, 1, ...};
-int i;
-char symbol[32];
-
-for (i=0; i<1000; i++) {
-    if (H5Tenum_nameof(hdf_en_colors, data+i, symbol,
-                       sizeof symbol))<0) {
-        if (symbol[0]) {
-            strcpy(symbol+sizeof(symbol)-4, "...");
-        } else {
-            strcpy(symbol, "UNKNOWN");
-        }
-    }
-    printf("%d %s\n", data[i], symbol);
-}
-printf("}\n");
- -

- Output: -

-
-4 BLACK
-2 BLUE
-0 RED
-0 RED
-5 UNKNOWN
-1 GREEN
-...
-
- -

7.6. Conversion

- -

Enumerated data can be converted from one type to another - provided the destination enumeration type contains all the - symbols of the source enumeration type. The conversion operates - by matching up the symbol names of the source and destination - enumeration types to build a mapping from source value to - destination value. For instance, if we are translating from an - enumeration type that defines a sequence of integers as the - values for the colors to a type that defines a different bit for - each color then the mapping might look like this: - -

Enumeration Mapping - -

That is, a source value of 2 which corresponds to - BLUE would be mapped to 0x0004. The - following code snippet builds the second data type, then - converts a raw data array from one data type to another, and - then prints the result. - -

-/* Create a new enumeration type */
-short val;
-hid_t bits = H5Tcreate(H5T_ENUM, sizeof val);
-H5Tenum_insert(bits, "RED",   (val=0x0001,&val));
-H5Tenum_insert(bits, "GREEN", (val=0x0002,&val));
-H5Tenum_insert(bits, "BLUE",  (val=0x0004,&val));
-H5Tenum_insert(bits, "WHITE", (val=0x0008,&val));
-H5Tenum_insert(bits, "BLACK", (val=0x0010,&val));
-
-/* The data */
-short data[6] = {1, 4, 2, 0, 3, 5};
-
-/* Convert the data from one type to another */
-H5Tconvert(hdf_en_colors, bits, 5, data, NULL);
-
-/* Print the data */
-for (i=0; i<6; i++) {
-    printf("0x%04x\n", (unsigned)(data[i]));
-}
- -

- Output: -

-
-0x0002
-0x0010
-0x0004
-0x0001
-0x0008
-0xffff
- -

If the source data stream contains values which are not in the - domain of the conversion map then an overflow exception is - raised within the library, causing the application defined - overflow handler to be invoked (see - H5Tset_overflow()). If no overflow handler is - defined then all bits of the destination value will be set. - -

The HDF library will not provide conversions between enumerated - data and integers although the application is free to do so - (this is a policy we apply to all classes of HDF data - types). However, since enumeration types are derived from - integer types it is permissible to treat enumerated data as - integers and perform integer conversions in that context. - -

7.7. Symbol Order

- -

Symbol order is determined by the integer values associated - with each symbol. When the integer data type is a native type, - testing the relative order of two symbols is an easy process: - simply compare the values of the symbols. If only the symbol - names are available then the values must first be determined by - calling H5Tenum_valueof(). - -

-short val1, val2;
-H5Tenum_valueof(hdf_en_colors, "WHITE", &val1);
-H5Tenum_valueof(hdf_en_colors, "BLACK", &val2);
-if (val1 < val2) ...
- -

When the underlying integer data type is not a native type then - the easiest way to compare symbols is to first create a similar - enumeration type that contains all the same symbols but has a - native integer type (HDF type conversion features can be used to - convert the non-native values to native values). Once we have a - native type we can compare symbol order as just described. If - foreign is some non-native enumeration type then a - native type can be created as follows: - -

-int n = H5Tget_nmembers(foreign);
-hid_t itype = H5Tget_super(foreign);
-void *val = malloc(n * MAX(H5Tget_size(itype), sizeof(int)));
-char *name = malloc(n * sizeof(char*));
-unsigned u;
-
-/* Get foreign type information */
-for (u=0; u<(unsigned)n; u++) {
-    name[u] = H5Tget_member_name(foreign, u);
-    H5Tget_member_value(foreign, u,
-                        (char*)val+u*H5Tget_size(foreign));
-}
-
-/* Convert integer values to new type */
-H5Tconvert(itype, H5T_NATIVE_INT, n, val, NULL);
-
-/* Build a native type */
-hid_t native = H5Tenum_create(H5T_NATIVE_INT);
-for (i=0; i<n; i++) {
-    H5Tenum_insert(native, name[i], ((int*)val)[i]);
-    free(name[i]);
-}
-free(name);
-free(val);
- -

It is also possible to convert enumerated data to a new type - that has a different order defined for the symbols. For - instance, we can define a new type, reverse that - defines the same five colors but in the reverse order. - -

-short val;
-int i;
-char sym[8];
-short data[5] = {0, 1, 2, 3, 4};
-
-hid_t reverse = H5Tenum_create(H5T_NATIVE_SHORT);
-H5Tenum_insert(reverse, "BLACK", (val=0,&val));
-H5Tenum_insert(reverse, "WHITE", (val=1,&val));
-H5Tenum_insert(reverse, "BLUE",  (val=2,&val));
-H5Tenum_insert(reverse, "GREEN", (val=3,&val));
-H5Tenum_insert(reverse, "RED",   (val=4,&val));
-
-/* Print data */
-for (i=0; i<5; i++) {
-    H5Tenum_nameof(hdf_en_colors, data+i, sym, sizeof sym);
-    printf ("%d %s\n", data[i], sym);
-}
-
-puts("Converting...");
-H5Tconvert(hdf_en_colors, reverse, 5, data, NULL);
-
-/* Print data */
-for (i=0; i<5; i++) {
-    H5Tenum_nameof(reverse, data+i, sym, sizeof sym);
-    printf ("%d %s\n", data[i], sym);
-}
- -

- Output: -

-0 RED
-1 GREEN
-2 BLUE
-3 WHITE
-4 BLACK
-Converting...
-4 RED
-3 GREEN
-2 BLUE
-1 WHITE
-0 BLACK
- -

7.8. Equality

- -

The order that members are inserted into an enumeration type is - unimportant; the important part is the associations between the - symbol names and the values. Thus, two enumeration data types - will be considered equal if and only if both types have the same - symbol/value associations and both have equal underlying integer - data types. Type equality is tested with the - H5Tequal() function. - -

7.9. Interacting with C's enum Type

- -

Although HDF enumeration data types are similar to C - enum data types, there are some important - differences: - -

-

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
DifferenceMotivation/Implications
Symbols are unquoted in C but quoted in - HDF.This allows the application to manipulate - symbol names in ways that are not possible with C.
The C compiler automatically replaces all - symbols with their integer values but HDF requires - explicit calls to do the same.C resolves symbols at compile time while - HDF resolves symbols at run time.
The mapping from symbols to integers is - N:1 in C but 1:1 in HDF.HDF can translate from value to name - uniquely and large switch statements are - not necessary to print values in human-readable - format.
A symbol must appear in only one C - enum type but may appear in multiple HDF - enumeration types.The translation from symbol to value in HDF - requires the data type to be specified while in C the - data type is not necessary because it can be inferred - from the symbol.
The underlying integer value is always a - native integer in C but can be a foreign integer type in - HDF.This allows HDF to describe data that might - reside on a foreign architecture, such as data stored in - a file.
The sign and size of the underlying integer - data type is chosen automatically by the C compiler but - must be fully specified with HDF.Since HDF doesn't require finalization of a - data type, complete specification of the type must be - supplied before the type is used. Requiring that - information at the time of type creation was a design - decision to simplify the library.
-
- -

The examples below use the following C data types: - -

- - - - -
-
-/* English color names */
-typedef enum {
-    RED,
-    GREEN,
-    BLUE,
-    WHITE,
-    BLACK
-} c_en_colors;
-
-/* Spanish color names, reverse order */
-typedef enum {
-    NEGRO
-    BLANCO,
-    AZUL,
-    VERDE,
-    ROJO,
-} c_sp_colors;
-
-/* No enum definition for French names */
-	    
-
- -

Creating HDF Types from C Types

- -

An HDF enumeration data type can be created from a C - enum type simply by passing pointers to the C - enum values to H5Tenum_insert(). For - instance, to create HDF types for the c_en_colors - type shown above: - -

- - - - -
-
-
-c_en_colors val;
-hid_t hdf_en_colors = H5Tcreate(H5T_ENUM, sizeof(c_en_colors));
-H5Tenum_insert(hdf_en_colors, "RED",   (val=RED,  &val));
-H5Tenum_insert(hdf_en_colors, "GREEN", (val=GREEN,&val));
-H5Tenum_insert(hdf_en_colors, "BLUE",  (val=BLUE, &val));
-H5Tenum_insert(hdf_en_colors, "WHITE", (val=WHITE,&val));
-H5Tenum_insert(hdf_en_colors, "BLACK", (val=BLACK,&val));
-
- -

Name Changes between Applications

- -

Occassionally two applicatons wish to exchange data but they - use different names for the constants they exchange. For - instance, an English and a Spanish program may want to - communicate color names although they use different symbols in - the C enum definitions. The communication is still - possible although the applications must agree on common terms - for the colors. The following example shows the Spanish code to - read the values assuming that the applications have agreed that - the color information will be exchanged using Enlish color - names: - -

- - - - -
-
-
-c_sp_colors val, data[1000];
-hid_t hdf_sp_colors = H5Tcreate(H5T_ENUM, sizeof(c_sp_colors));
-H5Tenum_insert(hdf_sp_colors, "RED",   (val=ROJO,   &val));
-H5Tenum_insert(hdf_sp_colors, "GREEN", (val=VERDE,  &val));
-H5Tenum_insert(hdf_sp_colors, "BLUE",  (val=AZUL,   &val));
-H5Tenum_insert(hdf_sp_colors, "WHITE", (val=BLANCO, &val));
-H5Tenum_insert(hdf_sp_colors, "BLACK", (val=NEGRO,  &val));
-
-H5Dread(dataset, hdf_sp_colors, H5S_ALL, H5S_ALL, H5P_DEFAULT, data);
-
- - -

Symbol Ordering across Applications

- -

Since symbol ordering is completely determined by the integer values - assigned to each symbol in the enum definition, - ordering of enum symbols cannot be preserved across - files like with HDF enumeration types. HDF can convert from one - application's integer values to the other's so a symbol in one - application's C enum gets mapped to the same symbol - in the other application's C enum, but the relative - order of the symbols is not preserved. - -

For example, an application may be defined to use the - definition of c_en_colors defined above where - WHITE is less than BLACK, but some - other application might define the colors in some other - order. If each application defines an HDF enumeration type based - on that application's C enum type then HDF will - modify the integer values as data is communicated from one - application to the other so that a RED value - in the first application is also a RED value in the - other application. - -

A case of this reordering of symbol names was also shown in the - previous code snippet (as well as a change of language), where - HDF changed the integer values so 0 (RED) in the - input file became 4 (ROJO) in the data - array. In the input file, WHITE was less than - BLACK; in the application the opposite is true. - -

In fact, the ability to change the order of symbols is often - convenient when the enumeration type is used only to group - related symbols that don't have any well defined order - relationship. - -

Internationalization

- -

The HDF enumeration type conversion features can also be used - to provide internationalization of debugging output. A program - written with the c_en_colors data type could define - a separate HDF data type for languages such as English, Spanish, - and French and cast the enumerated value to one of these HDF - types to print the result. - -

- - - - -
-
-
-c_en_colors val, *data=...;
-
-hid_t hdf_sp_colors = H5Tcreate(H5T_ENUM, sizeof val);
-H5Tenum_insert(hdf_sp_colors, "ROJO",   (val=RED,   &val));
-H5Tenum_insert(hdf_sp_colors, "VERDE",  (val=GREEN, &val));
-H5Tenum_insert(hdf_sp_colors, "AZUL",   (val=BLUE,  &val));
-H5Tenum_insert(hdf_sp_colors, "BLANCO", (val=WHITE, &val));
-H5Tenum_insert(hdf_sp_colors, "NEGRO",  (val=BLACK, &val));
-
-hid_t hdf_fr_colors = H5Tcreate(H5T_ENUM, sizeof val);
-H5Tenum_insert(hdf_fr_colors, "OUGE",  (val=RED,   &val));
-H5Tenum_insert(hdf_fr_colors, "VERT",  (val=GREEN, &val));
-H5Tenum_insert(hdf_fr_colors, "BLEU",  (val=BLUE,  &val));
-H5Tenum_insert(hdf_fr_colors, "BLANC", (val=WHITE, &val));
-H5Tenum_insert(hdf_fr_colors, "NOIR",  (val=BLACK, &val));
-
-void
-nameof(lang_t language, c_en_colors val, char *name, size_t size)
-{
-    switch (language) {
-    case ENGLISH:
-        H5Tenum_nameof(hdf_en_colors, &val, name, size);
-        break;
-    case SPANISH:
-        H5Tenum_nameof(hdf_sp_colors, &val, name, size);
-        break;
-    case FRENCH:
-        H5Tenum_nameof(hdf_fr_colors, &val, name, size);
-        break;
-    }
-}
-
- -

7.10. Goals That Have Been Met

- -

The main goal of enumeration types is to provide communication - of enumerated data using symbolic equivalence. That is, a - symbol written to a dataset by one application should be read as - the same symbol by some other application. - -

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Architecture IndependenceTwo applications shall be able to exchange - enumerated data even when the underlying integer values - have different storage formats. HDF accomplishes this for - enumeration types by building them upon integer types.
Preservation of Order RelationshipThe relative order of symbols shall be - preserved between two applications that use equivalent - enumeration data types. Unlike numeric values that have - an implicit ordering, enumerated data has an explicit - order defined by the enumeration data type and HDF - records this order in the file.
Order IndependenceAn application shall be able to change the - relative ordering of the symbols in an enumeration data - type. This is accomplished by defining a new type with - different integer values and converting data from one type - to the other.
SubsetsAn application shall be able to read - enumerated data from an archived dataset even after the - application has defined additional members for the - enumeration type. An application shall be able to write - to a dataset when the dataset contains a superset of the - members defined by the application. Similar rules apply - for in-core conversions between enumerated data - types.
TargetableAn application shall be able to target a - particular architecture or application when storing - enumerated data. This is accomplished by allowing - non-native underlying integer types and converting the - native data to non-native data.
Efficient Data TransferAn application that defines a file dataset - that corresponds to some native C enumerated data array - shall be able to read and write to that dataset directly - using only Posix read and write functions. HDF already - optimizes this case for integers, so the same optimization - will apply to enumerated data. -
Efficient StorageEnumerated data shall be stored in a manner - which is space efficient. HDF stores the enumerated data - as integers and allows the application to chose the size - and format of those integers.
- - -

- (Return to Data Types Interface (H5T).) - - -


-
- - - -
- Introduction to HDF5 
- HDF5 Reference Manual 
- Other HDF5 documents and links 
- -
- And in this document, the - HDF5 User's Guide:     - Files   -
- Datasets   - Data Types   - Dataspaces   - Groups   - References   -
- Attributes   - Property Lists   - Error Handling   - Filters   - Caching   -
- Chunking   - Debugging   - Environment   - DDL   - Ragged Arrays   - -
-
- - -
-
-HDF Help Desk -
- - -Last modified: 30 April 1999 -Footer modified: 3 July 2002 - - -
-This file is longer used; the material has been integrated into Datatypes.html. - - - - diff --git a/doc/html/Debugging.html b/doc/html/Debugging.html deleted file mode 100644 index d04cf27..0000000 --- a/doc/html/Debugging.html +++ /dev/null @@ -1,516 +0,0 @@ - - - - Debugging HDF5 Applications - - - - - - - - - - -
-
- - - -
- HDF5 documents and links 
- Introduction to HDF5 
- HDF5 Reference Manual 
- HDF5 User's Guide for Release 1.6 
- -
- And in this document, the - HDF5 User's Guide from Release 1.4.5:     -
- Files   - Datasets   - Datatypes   - Dataspaces   - Groups   -
- References   - Attributes   - Property Lists   - Error Handling   -
- Filters   - Caching   - Chunking   - Mounting Files   -
- Performance   - Debugging   - Environment   - DDL   -
-
-
-

Debugging HDF5 Applications

- -

Introduction

- -

The HDF5 library contains a number of debugging features to - make programmers' lives easier including the ability to print - detailed error messages, check invariant conditions, display - timings and other statistics, and trace API function calls and - return values. - -

-
Error Messages -
Error messages are normally displayed automatically on the - standard error stream and include a stack trace of the library - including file names, line numbers, and function names. The - application has complete control over how error messages are - displayed and can disable the display on a permanent or - temporary basis. Refer to the documentation for the H5E error - handling package. - -

-
Invariant Conditions -
Unless NDEBUG is defined during compiling, the - library will include code to verify that invariant conditions - have the expected values. When a problem is detected the - library will display the file and line number within the - library and the invariant condition that failed. A core dump - may be generated for post mortem debugging. The code to - perform these checks can be included on a per-package bases. - -

-
Timings and Statistics -
The library can be configured to accumulate certain - statistics about things like cache performance, datatype - conversion, data space conversion, and data filters. The code - is included on a per-package basis and enabled at runtime by - an environment variable. - -

-
API Tracing -
All API calls made by an application can be displayed and - include formal argument names and actual values and the - function return value. This code is also conditionally - included at compile time and enabled at runtime. -
- -

The statistics and tracing can be displayed on any output - stream (including streams opened by the shell) with output from - different packages even going to different streams. - -

Error Messages

- -

By default any API function that fails will print an error - stack to the standard error stream. - -

-

- - - - -
-

-
-HDF5-DIAG: Error detected in thread 0.  Back trace follows.
-  #000: H5F.c line 1245 in H5Fopen(): unable to open file
-    major(04): File interface
-    minor(10): Unable to open file
-  #001: H5F.c line 846 in H5F_open(): file does not exist
-    major(04): File interface
-    minor(10): Unable to open file
-	      
-
-
- -

The error handling package (H5E) is described - elsewhere. - -

Invariant Conditions

- -

To include checks for invariant conditions the library should - be configured with --disable-production, the - default for versions before 1.2. The library designers have made - every attempt to handle error conditions gracefully but an - invariant condition assertion may fail in certain cases. The - output from a failure usually looks something like this: - -

-

- - - - -
-

-
-Assertion failed: H5.c:123: i<NELMTS(H5_debug_g)
-IOT Trap, core dumped.
-	      
-
-
- -

Timings and Statistics

- -

Code to accumulate statistics is included at compile time by - using the --enable-debug configure switch. The - switch can be followed by an equal sign and a comma-separated - list of package names or else a default list is used. - -

-

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameDefaultDescription
aNoAttributes
acYesMeta data cache
bYesB-Trees
dYesDatasets
eYesError handling
fYesFiles
gYesGroups
hgYesGlobal heap
hlNoLocal heaps
iYesInterface abstraction
mfNoFile memory management
mmYesLibrary memory managment
oNoObject headers and messages
pYesProperty lists
sYesData spaces
tYesDatatypes
vYesVectors
zYesRaw data filters
-
- -

In addition to including the code at compile time the - application must enable each package at runtime. This is done - by listing the package names in the HDF5_DEBUG - environment variable. That variable may also contain file - descriptor numbers (the default is `2') which control the output - for all following packages up to the next file number. The - word all refers to all packages. Any word my be - preceded by a minus sign to turn debugging off for the package. - -

-

- - - - - - - - - - - - - - -
Sample debug specifications
allThis causes debugging output from all packages to be - sent to the standard error stream.
all -t -sDebugging output for all packages except datatypes - and data spaces will appear on the standard error - stream.
-all ac 255 t,sThis disables all debugging even if the default was to - debug something, then output from the meta data cache is - send to the standard error stream and output from data - types and spaces is sent to file descriptor 255 which - should be redirected by the shell.
-
- -

The components of the HDF5_DEBUG value may be - separated by any non-lowercase letter. - -

API Tracing

- -

The HDF5 library can trace API calls by printing the - function name, the argument names and their values, and the - return value. Some people like to see lots of output during - program execution instead of using a good symbolic debugger, and - this feature is intended for their consumption. For example, - the output from h5ls foo after turning on tracing, - includes: - -

-

- - - - -
-
-
-H5Tcopy(type=184549388) = 184549419 (type);
-H5Tcopy(type=184549392) = 184549424 (type);
-H5Tlock(type=184549424) = SUCCEED;
-H5Tcopy(type=184549393) = 184549425 (type);
-H5Tlock(type=184549425) = SUCCEED;
-H5Fopen(filename="foo", flags=0, access=H5P_DEFAULT) = FAIL;
-HDF5-DIAG: Error detected in thread 0.  Back trace follows.
-  #000: H5F.c line 1245 in H5Fopen(): unable to open file
-    major(04): File interface
-    minor(10): Unable to open file
-  #001: H5F.c line 846 in H5F_open(): file does not exist
-    major(04): File interface
-    minor(10): Unable to open file
-	      
-
-
- -

The code that performs the tracing must be included in the - library by specifying the --enable-trace - configuration switch (the default for versions before 1.2). Then - the word trace must appear in the value of the - HDF5_DEBUG variable. The output will appear on the - last file descriptor before the word trace or two - (standard error) by default. - -

-

- - - - - - - -
To display the trace on the standard error stream: -
-$ env HDF5_DEBUG=trace a.out
-	      
-
To send the trace to a file: -
-$ env HDF5_DEBUG="55 trace" a.out 55>trace-output
-	      
-
-
- -

Performance

- -

If the library was not configured for tracing then there is no - unnecessary overhead since all tracing code is excluded. - However, if tracing is enabled but not used there is a small - penalty. First, code size is larger because of extra - statically-declared character strings used to store argument - types and names and extra auto variable pointer in each - function. Also, execution is slower because each function sets - and tests a local variable and each API function calls the - H5_trace() function. - -

If tracing is enabled and turned on then the penalties from the - previous paragraph apply plus the time required to format each - line of tracing information. There is also an extra call to - H5_trace() for each API function to print the return value. - -

Safety

- -

The tracing mechanism is invoked for each API function before - arguments are checked for validity. If bad arguments are passed - to an API function it could result in a segmentation fault. - However, the tracing output is line-buffered so all previous - output will appear. - -

Completeness

- -

There are two API functions that don't participate in - tracing. They are H5Eprint() and - H5Eprint_cb() because their participation would - mess up output during automatic error reporting. - -

On the other hand, a number of API functions are called during - library initialization and they print tracing information. - -

Implementation

- -

For those interested in the implementation here is a - description. Each API function should have a call to one of the - H5TRACE() macros immediately after the - FUNC_ENTER() macro. The first argument is the - return type encoded as a string. The second argument is the - types of all the function arguments encoded as a string. The - remaining arguments are the function arguments. This macro was - designed to be as terse and unobtrousive as possible. - -

In order to keep the H5TRACE() calls synchronized - with the source code we've written a perl script which gets - called automatically just before Makefile dependencies are - calculated for the file. However, this only works when one is - using GNU make. To reinstrument the tracing explicitly, invoke - the trace program from the hdf5 bin directory with - the names of the source files that need to be updated. If any - file needs to be modified then a backup is created by appending - a tilde to the file name. - -

-

- - - - - -
Explicit Instrumentation
-
-
-$ ../bin/trace *.c
-H5E.c: in function `H5Ewalk_cb':
-H5E.c:336: warning: trace info was not inserted
-	      
-
-
- -

Note: The warning message is the result of a comment of the - form /*NO TRACE*/ somewhere in the function - body. Tracing information will not be updated or inserted if - such a comment exists. - -

Error messages have the same format as a compiler so that they - can be parsed from program development environments like - Emacs. Any function which generates an error will not be - modified.

- - -
-
- - - -
- HDF5 documents and links 
- Introduction to HDF5 
- HDF5 Reference Manual 
- HDF5 User's Guide for Release 1.6 
- -
- And in this document, the - HDF5 User's Guide from Release 1.4.5:     -
- Files   - Datasets   - Datatypes   - Dataspaces   - Groups   -
- References   - Attributes   - Property Lists   - Error Handling   -
- Filters   - Caching   - Chunking   - Mounting Files   -
- Performance   - Debugging   - Environment   - DDL   -
-
-
-
-HDF Help Desk -
-Describes HDF5 Release 1.4.5, February 2003 -
- -Last modified: 13 December 1999 - - - - - diff --git a/doc/html/EnumMap.gif b/doc/html/EnumMap.gif deleted file mode 100644 index d06f06a..0000000 Binary files a/doc/html/EnumMap.gif and /dev/null differ diff --git a/doc/html/Environment.html b/doc/html/Environment.html deleted file mode 100644 index a00998b..0000000 --- a/doc/html/Environment.html +++ /dev/null @@ -1,166 +0,0 @@ - - - - -Environment Variables and Configuration Parameters - - - - - - - - - - -
-
- - - -
- HDF5 documents and links 
- Introduction to HDF5 
- HDF5 Reference Manual 
- HDF5 User's Guide for Release 1.6 
- -
- And in this document, the - HDF5 User's Guide from Release 1.4.5:     -
- Files   - Datasets   - Datatypes   - Dataspaces   - Groups   -
- References   - Attributes   - Property Lists   - Error Handling   -
- Filters   - Caching   - Chunking   - Mounting Files   -
- Performance   - Debugging   - Environment   - DDL   -
-
-
-

HDF5 Library Environment Variables and Configuration Parameters

- -

1. Environment Variables

- -The HDF5 library uses UNIX environment variables to control -or adjust certain library features at runtime. The variables and -their defined effects are as follows: - - -
-
HDF5_DEBUG -
Defines a list of debugging switches documented in the - Debugging section of the - HDF5 User's Guide. - -
HDF5_NOCLEANUP -
When set to a non-empty value, the programs in the test - directory do not remove temporary HDF5 data files. - The default is for each test to remove the files before exit. - -
HDF5_DISABLE_VERSION_CHECK -
When set to 1, HDF5 will not abort when the version - of the HDF5 headers doesn't match the version of the HDF5 library. - -
HDF5_MPI_OPT_TYPES -   (for parallel beta version only) -
When set to 1, PHDF5 will use the MPI optimized - code to perform parallel read/write accesses to datasets. - Currently, this optimization fails when accessing extendable - datasets. The default is not to use the optimized code. - -
-
- -

2. Configuration Parameters

- -The HDF5 configuration script accepts a list of parameters to control -configuration features when creating the Makefiles for the library. -The command -
-     configure --help -
-will display the current list of parameters and their effects. -

- - -
-
- - - -
- HDF5 documents and links 
- Introduction to HDF5 
- HDF5 Reference Manual 
- HDF5 User's Guide for Release 1.6 
- -
- And in this document, the - HDF5 User's Guide from Release 1.4.5:     -
- Files   - Datasets   - Datatypes   - Dataspaces   - Groups   -
- References   - Attributes   - Property Lists   - Error Handling   -
- Filters   - Caching   - Chunking   - Mounting Files   -
- Performance   - Debugging   - Environment   - DDL   -
-
-
-
-HDF Help Desk -
-Describes HDF5 Release 1.4.5, February 2003 -
- -Last modified: 13 December 1999 - - - - diff --git a/doc/html/Errors.html b/doc/html/Errors.html deleted file mode 100644 index 29a00ba..0000000 --- a/doc/html/Errors.html +++ /dev/null @@ -1,386 +0,0 @@ - - - - Error Handling Interface (H5E) - - - - - - - - - - -
-
- - - -
- HDF5 documents and links 
- Introduction to HDF5 
- HDF5 Reference Manual 
- HDF5 User's Guide for Release 1.6 
- -
- And in this document, the - HDF5 User's Guide from Release 1.4.5:     -
- Files   - Datasets   - Datatypes   - Dataspaces   - Groups   -
- References   - Attributes   - Property Lists   - Error Handling   -
- Filters   - Caching   - Chunking   - Mounting Files   -
- Performance   - Debugging   - Environment   - DDL   -
-
-
-

The Error Handling Interface (H5E)

- -

1. Introduction

- -

When an error occurs deep within the HDF5 library a record is - pushed onto an error stack and that function returns a failure - indication. Its caller detects the failure, pushes another - record onto the stack, and returns a failure indication. This - continues until the application-called API function returns a - failure indication (a negative integer or null pointer). The - next API function which is called (with a few exceptions) resets - the stack. - -

2. Error Handling Operations

- -

In normal circumstances, an error causes the stack to be - printed on the standard error stream. The first item, number - "#000" is produced by the API function itself and is usually - sufficient to indicate to the application programmer what went - wrong. - -

-

- - - - - -

Example: An Error Message

-

If an application calls H5Tclose on a - predefined datatype then the following message is - printed on the standard error stream. This is a - simple error that has only one component, the API - function; other errors may have many components. - -

-HDF5-DIAG: Error detected in thread 0.  Back trace follows.
-  #000: H5T.c line 462 in H5Tclose(): predefined datatype
-    major(01): Function argument
-    minor(05): Bad value
-	      
-
-
- -

The error stack can also be printed and manipulated by these - functions, but if an application wishes make explicit calls to - H5Eprint() then the automatic printing should be - turned off to prevent error messages from being displayed twice - (see H5Eset_auto() below). - -

-
herr_t H5Eprint (FILE *stream) -
The error stack is printed on the specified stream. Even if - the error stack is empty a one-line message will be printed: - HDF5-DIAG: Error detected in thread 0. - -

-
herr_t H5Eclear (void) -
The error stack can be explicitly cleared by calling this - function. The stack is also cleared whenever an API function - is called, with certain exceptions (for instance, - H5Eprint()). -
- -

Sometimes an application will call a function for the sake of - its return value, fully expecting the function to fail. Under - these conditions, it would be misleading if an error message - were automatically printed. Automatic printing of messages is - controlled by the H5Eset_auto() function: - -

-
herr_t H5Eset_auto (herr_t(*func)(void*), - void *client_data) -
If func is not a null pointer, then the function to - which it points will be called automatically when an API - function is about to return an indication of failure. The - function is called with a single argument, the - client_data pointer. When the library is first - initialized the auto printing function is set to - H5Eprint() (cast appropriately) and - client_data is the standard error stream pointer, - stderr. - -

-
herr_t H5Eget_auto (herr_t(**func)(void*), - void **client_data) -
This function returns the current automatic error traversal - settings through the func and client_data - arguments. Either (or both) arguments may be null pointers in - which case the corresponding information is not returned. -
- -

-

- - - - - -

Example: Error Control

-

An application can temporarily turn off error - messages while "probing" a function. - -

-/* Save old error handler */
-herr_t (*old_func)(void*);
-void *old_client_data;
-H5Eget_auto(&old_func, &old_client_data);
-
-/* Turn off error handling */
-H5Eset_auto(NULL, NULL);
-
-/* Probe. Likely to fail, but that's okay */
-status = H5Fopen (......);
-
-/* Restore previous error handler */
-H5Eset_auto(old_func, old_client_data);
-	      
- -

Or automatic printing can be disabled altogether and - error messages can be explicitly printed. - -

-/* Turn off error handling permanently */
-H5Eset_auto (NULL, NULL);
-
-/* If failure, print error message */
-if (H5Fopen (....)<0) {
-    H5Eprint (stderr);
-    exit (1);
-}
-	      
-
-
- -

The application is allowed to define an automatic error - traversal function other than the default - H5Eprint(). For instance, one could define a - function that prints a simple, one-line error message to the - standard error stream and then exits. - -

-

- - - - - -

Example: Simple Messages

-

The application defines a function to print a simple - error message to the standard error stream. - -

-herr_t
-my_hdf5_error_handler (void *unused)
-{
-   fprintf (stderr, "An HDF5 error was detected. Bye.\n");
-   exit (1);
-}
-	      
- -

The function is installed as the error handler by - saying - -

-H5Eset_auto (my_hdf5_error_handler, NULL);
-	      
-
-
- -

The H5Eprint() function is actually just a wrapper - around the more complex H5Ewalk() function which - traverses an error stack and calls a user-defined function for - each member of the stack. - -

-
herr_t H5Ewalk (H5E_direction_t direction, - H5E_walk_t func, void *client_data) -
The error stack is traversed and func is called for - each member of the stack. Its arguments are an integer - sequence number beginning at zero (regardless of - direction), a pointer to an error description record, - and the client_data pointer. If direction - is H5E_WALK_UPWARD then traversal begins at the - inner-most function that detected the error and concludes with - the API function. The opposite order is - H5E_WALK_DOWNWARD. - -

-
typedef herr_t (*H5E_walk_t)(int n, - H5E_error_t *eptr, void - *client_data) -
An error stack traversal callback function takes three - arguments: n is a sequence number beginning at zero - for each traversal, eptr is a pointer to an error - stack member, and client_data is the same pointer - passed to H5Ewalk(). - -

-
typedef struct {
-    H5E_major_t maj_num;
-    H5E_minor_t min_num;
-    const char  *func_name;
-    const char  *file_name;
-    unsigned    line;
-    const char  *desc;
-} H5E_error_t;
-
The maj_num and min_num are major - and minor error numbers, func_name is the name of - the function where the error was detected, - file_name and line locate the error - within the HDF5 library source code, and desc - points to a description of the error. - -

-
const char *H5Eget_major (H5E_major_t num) -
const char *H5Eget_minor (H5E_minor_t num) -
These functions take a major or minor error number and - return a constant string which describes the error. If - num is out of range than a string like "Invalid major - error number" is returned. -
- -

-

- - - - - -

Example: H5Ewalk_cb

-

This is the implementation of the default error stack - traversal callback. - -

-herr_t
-H5Ewalk_cb(int n, H5E_error_t *err_desc, void *client_data)
-{
-    FILE		*stream = (FILE *)client_data;
-    const char		*maj_str = NULL;
-    const char		*min_str = NULL;
-    const int		indent = 2;
-
-    /* Check arguments */
-    assert (err_desc);
-    if (!client_data) client_data = stderr;
-
-    /* Get descriptions for the major and minor error numbers */
-    maj_str = H5Eget_major (err_desc->maj_num);
-    min_str = H5Eget_minor (err_desc->min_num);
-
-    /* Print error message */
-    fprintf (stream, "%*s#%03d: %s line %u in %s(): %s\n",
-	     indent, "", n, err_desc->file_name, err_desc->line,
-	     err_desc->func_name, err_desc->desc);
-    fprintf (stream, "%*smajor(%02d): %s\n",
-	     indent*2, "", err_desc->maj_num, maj_str);
-    fprintf (stream, "%*sminor(%02d): %s\n",
-	     indent*2, "", err_desc->min_num, min_str);
-
-    return 0;
-}
-	      
-
-
- - -
-
- - - -
- HDF5 documents and links 
- Introduction to HDF5 
- HDF5 Reference Manual 
- HDF5 User's Guide for Release 1.6 
- -
- And in this document, the - HDF5 User's Guide from Release 1.4.5:     -
- Files   - Datasets   - Datatypes   - Dataspaces   - Groups   -
- References   - Attributes   - Property Lists   - Error Handling   -
- Filters   - Caching   - Chunking   - Mounting Files   -
- Performance   - Debugging   - Environment   - DDL   -
-
-
-
-HDF Help Desk -
-Describes HDF5 Release 1.4.5, February 2003 -
- -Last modified: 13 December 1999 - - - - diff --git a/doc/html/ExternalFiles.html b/doc/html/ExternalFiles.html deleted file mode 100644 index 0213ea8..0000000 --- a/doc/html/ExternalFiles.html +++ /dev/null @@ -1,279 +0,0 @@ - - - - External Files in HDF5 - - - -

External Files in HDF5

- -

Overview of Layers

- -

This table shows some of the layers of HDF5. Each layer calls - functions at the same or lower layers and never functions at - higher layers. An object identifier (OID) takes various forms - at the various layers: at layer 0 an OID is an absolute physical - file address; at layers 1 and 2 it's an absolute virtual file - address. At layers 3 through 6 it's a relative address, and at - layers 7 and above it's an object handle. - -

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Layer-7GroupsDatasets
Layer-6Indirect StorageSymbol Tables
Layer-5B-treesObject HdrsHeaps
Layer-4Caching
Layer-3H5F chunk I/O
Layer-2H5F low
Layer-1File FamilySplit Meta/Raw
Layer-0Section-2 I/OStandard I/OMalloc/Free
-
- -

Single Address Space

- -

The simplest form of hdf5 file is a single file containing only - hdf5 data. The file begins with the super block, which is - followed until the end of the file by hdf5 data. The next most - complicated file allows non-hdf5 data (user defined data or - internal wrappers) to appear before the super block and after the - end of the hdf5 data. The hdf5 data is treated as a single - linear address space in both cases. - -

The next level of complexity comes when non-hdf5 data is - interspersed with the hdf5 data. We handle that by including - the non-hdf5 interspersed data in the hdf5 address space and - simply not referencing it (eventually we might add those - addresses to a "do-not-disturb" list using the same mechanism as - the hdf5 free list, but it's not absolutely necessary). This is - implemented except for the "do-not-disturb" list. - -

The most complicated single address space hdf5 file is when we - allow the address space to be split among multiple physical - files. For instance, a >2GB file can be split into smaller - chunks and transfered to a 32 bit machine, then accessed as a - single logical hdf5 file. The library already supports >32 bit - addresses, so at layer 1 we split a 64-bit address into a 32-bit - file number and a 32-bit offset (the 64 and 32 are - arbitrary). The rest of the library still operates with a linear - address space. - -

Another variation might be a family of two files where all the - meta data is stored in one file and all the raw data is stored - in another file to allow the HDF5 wrapper to be easily replaced - with some other wrapper. - -

The H5Fcreate and H5Fopen functions - would need to be modified to pass file-type info down to layer 2 - so the correct drivers can be called and parameters passed to - the drivers to initialize them. - -

Implementation

- -

I've implemented fixed-size family members. The entire hdf5 - file is partitioned into members where each member is the same - size. The family scheme is used if one passes a name to - H5F_open (which is called by H5Fopen() - and H5Fcreate) that contains a - printf(3c)-style integer format specifier. - Currently, the default low-level file driver is used for all - family members (H5F_LOW_DFLT, usually set to be Section 2 I/O or - Section 3 stdio), but we'll probably eventually want to pass - that as a parameter of the file access property list, which - hasn't been implemented yet. When creating a family, a default - family member size is used (defined at the top H5Ffamily.c, - currently 64MB) but that also should be settable in the file - access property list. When opening an existing family, the size - of the first member is used to determine the member size - (flushing/closing a family ensures that the first member is the - correct size) but the other family members don't have to be that - large (the local address space, however, is logically the same - size for all members). - -

I haven't implemented a split meta/raw family yet but am rather - curious to see how it would perform. I was planning to use the - `.h5' extension for the meta data file and `.raw' for the raw - data file. The high-order bit in the address would determine - whether the address refers to meta data or raw data. If the user - passes a name that ends with `.raw' to H5F_open - then we'll chose the split family and use the default low level - driver for each of the two family members. Eventually we'll - want to pass these kinds of things through the file access - property list instead of relying on naming convention. - -

External Raw Data

- -

We also need the ability to point to raw data that isn't in the - HDF5 linear address space. For instance, a dataset might be - striped across several raw data files. - -

Fortunately, the only two packages that need to be aware of - this are the packages for reading/writing contiguous raw data - and discontiguous raw data. Since contiguous raw data is a - special case, I'll discuss how to implement external raw data in - the discontiguous case. - -

Discontiguous data is stored as a B-tree whose keys are the - chunk indices and whose leaf nodes point to the raw data by - storing a file address. So what we need is some way to name the - external files, and a way to efficiently store the external file - name for each chunk. - -

I propose adding to the object header an External File - List message that is a 1-origin array of file names. - Then, in the B-tree, each key has an index into the External - File List (or zero for the HDF5 file) for the file where the - chunk can be found. The external file index is only used at - the leaf nodes to get to the raw data (the entire B-tree is in - the HDF5 file) but because of the way keys are copied among - the B-tree nodes, it's much easier to store the index with - every key. - -

Multiple HDF5 Files

- -

One might also want to combine two or more HDF5 files in a - manner similar to mounting file systems in Unix. That is, the - group structure and meta data from one file appear as though - they exist in the first file. One opens File-A, and then - mounts File-B at some point in File-A, the mount - point, so that traversing into the mount point actually - causes one to enter the root object of File-B. File-A and - File-B are each complete HDF5 files and can be accessed - individually without mounting them. - -

We need a couple additional pieces of machinery to make this - work. First, an haddr_t type (a file address) doesn't contain - any info about which HDF5 file's address space the address - belongs to. But since haddr_t is an opaque type except at - layers 2 and below, it should be quite easy to add a pointer to - the HDF5 file. This would also remove the H5F_t argument from - most of the low-level functions since it would be part of the - OID. - -

The other thing we need is a table of mount points and some - functions that understand them. We would add the following - table to each H5F_t struct: - -

-struct H5F_mount_t {
-   H5F_t *parent;         /* Parent HDF5 file if any */
-   struct {
-      H5F_t *f;           /* File which is mounted */
-      haddr_t where;      /* Address of mount point */
-   } *mount;              /* Array sorted by mount point */
-   intn nmounts;          /* Number of mounted files */
-   intn alloc;            /* Size of mount table */
-}
-    
- -

The H5Fmount function takes the ID of an open - file or group, the name of a to-be-mounted file, the name of the mount - point, and a file access property list (like H5Fopen). - It opens the new file and adds a record to the parent's mount - table. The H5Funmount function takes the parent - file or group ID and the name of the mount point and disassociates - the mounted file from the mount point. It does not close the - mounted file. The H5Fclose - function closes/unmounts files recursively. - -

The H5G_iname function which translates a name to - a file address (haddr_t) looks at the mount table - at each step in the translation and switches files where - appropriate. All name-to-address translations occur through - this function. - -

How Long?

- -

I'm expecting to be able to implement the two new flavors of - single linear address space in about two days. It took two hours - to implement the malloc/free file driver at level zero and I - don't expect this to be much more work. - -

I'm expecting three days to implement the external raw data for - discontiguous arrays. Adding the file index to the B-tree is - quite trivial; adding the external file list message shouldn't - be too hard since the object header message class from wich this - message derives is fully implemented; and changing - H5F_istore_read should be trivial. Most of the - time will be spent designing a way to cache Unix file - descriptors efficiently since the total number open files - allowed per process could be much smaller than the total number - of HDF5 files and external raw data files. - -

I'm expecting four days to implement being able to mount one - HDF5 file on another. I was originally planning a lot more, but - making haddr_t opaque turned out to be much easier - than I planned (I did it last Fri). Most of the work will - probably be removing the redundant H5F_t arguments for lots of - functions. - -

Conclusion

- -

The external raw data could be implemented as a single linear - address space, but doing so would require one to allocate large - enough file addresses throughout the file (>32bits) before the - file was created. It would make mixing an HDF5 file family with - external raw data, or external HDF5 wrapper around an HDF4 file - a more difficult process. So I consider the implementation of - external raw data files as a single HDF5 linear address space a - kludge. - -

The ability to mount one HDF5 file on another might not be a - very important feature especially since each HDF5 file must be a - complete file by itself. It's not possible to stripe an array - over multiple HDF5 files because the B-tree wouldn't be complete - in any one file, so the only choice is to stripe the array - across multiple raw data files and store the B-tree in the HDF5 - file. On the other hand, it might be useful if one file - contains some public data which can be mounted by other files - (e.g., a mesh topology shared among collaborators and mounted by - files that contain other fields defined on the mesh). Of course - the applications can open the two files separately, but it might - be more portable if we support it in the library. - -

So we're looking at about two weeks to implement all three - versions. I didn't get a chance to do any of them in AIO - although we had long-term plans for the first two with a - possibility of the third. They'll be much easier to implement in - HDF5 than AIO since I've been keeping these in mind from the - start. - -


-
Robb Matzke
- - -Last modified: Tue Sep 8 14:43:32 EDT 1998 - - - diff --git a/doc/html/FF-IH_FileGroup.gif b/doc/html/FF-IH_FileGroup.gif deleted file mode 100644 index b0d76f5..0000000 Binary files a/doc/html/FF-IH_FileGroup.gif and /dev/null differ diff --git a/doc/html/FF-IH_FileObject.gif b/doc/html/FF-IH_FileObject.gif deleted file mode 100644 index 8eba623..0000000 Binary files a/doc/html/FF-IH_FileObject.gif and /dev/null differ diff --git a/doc/html/Files.html b/doc/html/Files.html deleted file mode 100644 index d490436..0000000 --- a/doc/html/Files.html +++ /dev/null @@ -1,607 +0,0 @@ - - - - File Interface (H5F) - - - - - - - - - - -
-
- - - -
- HDF5 documents and links 
- Introduction to HDF5 
- HDF5 Reference Manual 
- HDF5 User's Guide for Release 1.6 
- -
- And in this document, the - HDF5 User's Guide from Release 1.4.5:     -
- Files   - Datasets   - Datatypes   - Dataspaces   - Groups   -
- References   - Attributes   - Property Lists   - Error Handling   -
- Filters   - Caching   - Chunking   - Mounting Files   -
- Performance   - Debugging   - Environment   - DDL   -
-
-
-

The File Interface (H5F)

- -

1. Introduction

- -

HDF5 files are composed of a super block describing information - required to portably access files on multiple platforms, followed - by information about the groups in a file and the datasets in the - file. The super block contains information about the size of offsets - and lengths of objects, the number of entries in symbol tables - (used to store groups) and additional version information for the - file. - -

2. File access modes

- -

The HDF5 library assumes that all files are implicitly opened for read - access at all times. Passing the H5F_ACC_RDWR - parameter to H5Fopen() allows write access to a - file also. H5Fcreate() assumes write access as - well as read access, passing H5F_ACC_TRUNC forces - the truncation of an existing file, otherwise H5Fcreate will - fail to overwrite an existing file. - -

3. Creating, Opening, and Closing Files

- -

Files are created with the H5Fcreate() function, - and existing files can be accessed with H5Fopen(). Both - functions return an object ID which should be eventually released by - calling H5Fclose(). - -

-
hid_t H5Fcreate (const char *name, uintn - flags, hid_t create_properties, hid_t - access_properties) -
This function creates a new file with the specified name in - the current directory. The file is opened with read and write - permission, and if the H5F_ACC_TRUNC flag is set, - any current file is truncated when the new file is created. - If a file of the same name exists and the - H5F_ACC_TRUNC flag is not set (or the - H5F_ACC_EXCL bit is set), this function will - fail. Passing H5P_DEFAULT for the creation - and/or access property lists uses the library's default - values for those properties. Creating and changing the - values of a property list is documented further below. The - return value is an ID for the open file and it should be - closed by calling H5Fclose() when it's no longer - needed. A negative value is returned for failure. - -

-
hid_t H5Fopen (const char *name, uintn - flags, hid_t access_properties) -
This function opens an existing file with read permission - and write permission if the H5F_ACC_RDWR flag is - set. The access_properties is a file access property - list ID or H5P_DEFAULT for the default I/O access - parameters. Creating and changing the parameters for access - property lists is documented further below. Files which are opened - more than once return a unique identifier for each - H5Fopen() call and can be accessed through all - file IDs. The return value is an ID for the open file and it - should be closed by calling H5Fclose() when it's - no longer needed. A negative value is returned for failure. - -

-
herr_t H5Fclose (hid_t file_id) -
This function releases resources used by a file which was - opened by H5Fcreate() or H5Fopen(). After - closing a file the file_id should not be used again. This - function returns zero for success or a negative value for failure. - -

-
herr_t H5Fflush (hid_t object_id, - H5F_scope_t scope) -
This function will cause all buffers associated with a file - to be immediately flushed to the file. The object_id - can be any object which is associated with a file, including - the file itself. scope specifies whether the flushing - action is to be global or local. -
- -

4. File Property Lists

- -

Additional parameters to H5Fcreate() or - H5Fopen() are passed through property list - objects, which are created with the H5Pcreate() - function. These objects allow many parameters of a file's - creation or access to be changed from the default values. - Property lists are used as a portable and extensible method of - modifying multiple parameter values with simple API functions. - There are two kinds of file-related property lists, - namely file creation properties and file access properties. - -

4.1. File Creation Properties

- -

File creation property lists apply to H5Fcreate() only - and are used to control the file meta-data which is maintained - in the super block of the file. The parameters which can be - modified are: - -

-
User-Block Size
The user-block is a fixed length block of - data located at the beginning of the file which is ignored by the - HDF5 library and may be used to store any data information found - to be useful to applications. This value may be set to any power - of two equal to 512 or greater (i.e. 512, 1024, 2048, etc). This - parameter is set and queried with the - H5Pset_userblock() and - H5Pget_userblock() calls. - -

-
Offset and Length Sizes -
The number of bytes used to store the offset and length of - objects in the HDF5 file can be controlled with this - parameter. Values of 2, 4 and 8 bytes are currently - supported to allow 16-bit, 32-bit and 64-bit files to - be addressed. These parameters are set and queried - with the H5Pset_sizes() and - H5Pget_sizes() calls. - -

-
Symbol Table Parameters -
The size of symbol table B-trees can be controlled by setting - the 1/2 rank and 1/2 node size parameters of the B-tree. These - parameters are set and queried with the - H5Pset_sym_k() and H5Pget_sym_k() calls. - -

-
Indexed Storage Parameters -
The size of indexed storage B-trees can be controlled by - setting the 1/2 rank and 1/2 node size parameters of the B-tree. - These parameters are set and queried with the - H5Pset_istore_k() and H5Pget_istore_k() - calls. -
- -

4.2. File Access Property Lists

- -

File access property lists apply to H5Fcreate() or - H5Fopen() and are used to control different methods of - performing I/O on files. - -

-
Unbuffered I/O -
Local permanent files can be accessed with the functions described - in Section 2 of the Posix manual, namely open(), - lseek(), read(), write(), and - close(). The lseek64() function is used - on operating systems that support it. This driver is enabled and - configured with H5Pset_fapl_sec2(). - -

-
Buffered I/O -
Local permanent files can be accessed with the functions declared - in the standard C header file stdio.h, namely - fopen(), fseek(), fread(), - fwrite(), and fclose(). The - fseek64() function is used on operating systems that - support it. This driver is enabled and configured with - H5Pset_fapl_stdio(). - -

-
Memory I/O -
Local temporary files can be created and accessed directly from - memory without ever creating permanent storage. The library uses - malloc() and free() to create storage - space for the file. The total size of the file must be small enough - to fit in virtual memory. The name supplied to - H5Fcreate() is irrelevant, and H5Fopen() - will always fail. - -

-
Parallel Files using MPI I/O -
This driver allows parallel access to a file through the MPI I/O - library. The parameters which can be modified are the MPI - communicator, the info object, and the access mode. - The communicator and info object are saved and then - passed to MPI_File_open() during file creation or open. - The access_mode controls the kind of parallel access the application - intends. (Note that it is likely that the next API revision will - remove the access_mode parameter and have access control specified - via the raw data transfer property list of H5Dread() - and H5Dwrite().) These parameters are set and queried - with the H5Pset_fapl_mpi() and - H5Pget_fapl_mpi() calls. - -

-
Data Alignment -
Sometimes file access is faster if certain things are - aligned on file blocks. This can be controlled by setting - alignment properties of a file access property list with the - H5Pset_alignment() function. Any allocation - request at least as large as some threshold will be aligned on - an address which is a multiple of some number. -
- -

5. Examples of using file property lists

- -

5.1. Example of using file creation property lists

- -

This following example shows how to create a file with 64-bit object - offsets and lengths:
-

-        hid_t create_plist;
-        hid_t file_id;
-
-        create_plist = H5Pcreate(H5P_FILE_CREATE);
-        H5Pset_sizes(create_plist, 8, 8);
-
-        file_id = H5Fcreate("test.h5", H5F_ACC_TRUNC,
-                             create_plist, H5P_DEFAULT);
-        .
-        .
-        .
-        H5Fclose(file_id);
-    
- -

5.2. Example of using file creation plist

- -

This following example shows how to open an existing file for - independent datasets access by MPI parallel I/O:
-

-        hid_t access_plist;
-        hid_t file_id;
-
-        access_plist = H5Pcreate(H5P_FILE_ACCESS);
-        H5Pset_fapl_mpi(access_plist, MPI_COMM_WORLD, MPI_INFO_NULL);
-
-	/* H5Fopen must be called collectively */
-        file_id = H5Fopen("test.h5", H5F_ACC_RDWR, access_plist);
-        .
-        .
-        .
-	/* H5Fclose must be called collectively */
-        H5Fclose(file_id);
-        
- - -

6. Low-level File Drivers

- -

HDF5 is able to access its address space through various types of - low-level file drivers. For instance, an address space might - correspond to a single file on a Unix file system, multiple files on a - Unix file system, multiple files on a parallel file system, or a block - of memory within the application. Generally, an HDF5 address space is - referred to as an HDF5 file regardless of how the space is organized - at the storage level. - -

6.1. Unbuffered Permanent Files

- -

The sec2 driver uses functions from section 2 of the - Posix manual to access files stored on a local file system. These are - the open(), close(), read(), - write(), and lseek() functions. If the - operating system supports lseek64() then it is used instead - of lseek(). The library buffers meta data regardless of - the low-level driver, but using this driver prevents data from being - buffered again by the lowest layers of the HDF5 library. - -

-
hid_t H5Pget_driver (hid_t access_properties) -
This function returns the constant H5FD_SEC2 if the - sec2 driver is defined as the low-level driver for the - specified access property list. - -

-
herr_t H5Pset_fapl_sec2 - (hid_t access_properties) -
The file access properties are set to use the sec2 - driver. Any previously defined driver properties are erased from the - property list. Additional parameters may be added to this function in - the future. - -
- -

6.2. Buffered Permanent Files

- -

The stdio driver uses the functions declared in the - stdio.h header file to access permanent files in a local - file system. These are the fopen(), fclose(), - fread(), fwrite(), and fseek() - functions. If the operating system supports fseek64() then - it is used instead of fseek(). Use of this driver - introduces an additional layer of buffering beneath the HDF5 library. - -

-
hid_t H5Pget_driver(hid_t access_properties) -
This function returns the constant H5FD_STDIO if the - stdio driver is defined as the low-level driver for the - specified access property list. - -

-
herr_t H5Pset_fapl_stdio - (hid_t access_properties) -
The file access properties are set to use the stdio - driver. Any previously defined driver properties are erased from the - property list. Additional parameters may be added to this function in - the future. - -
- -

6.3. Buffered Temporary Files

- -

The core driver uses malloc() and - free() to allocate space for a file in the heap. Reading - and writing to a file of this type results in mem-to-mem copies instead - of disk I/O and as a result is somewhat faster. However, the total file - size must not exceed the amount of available virtual memory, and only - one HDF5 file handle can access the file (because the name of such a - file is insignificant and H5Fopen() always fails). - -

-
hid_t H5Pget_driver (hid_t access_properties) -
This function returns the constant H5FD_CORE if the - core driver is defined as the low-level driver for the - specified access property list. - -

-
herr_t H5Pset_fapl_core (hid_t access_properties, - size_t block_size, - hbool_t backing_store) -
The file access properties are set to use the core - driver and any previously defined driver properties are erased from - the property list. Memory for the file will always be allocated in - units of the specified block_size. Additional parameters may - be added to this function in the future. - -

-
herr_t H5Pget_fapl_core (hid_t access_properties, - size_t *block_size), - hbool_t *backing_store) -
If the file access property list is set to the core driver - then this function returns zero and block_size is set to the - block size used for the file; otherwise it returns a negative - value. In the future, additional arguments may be added to this - function to match those added to H5Pset_fapl_core(). -
- -

6.4. Parallel Files

- -

This driver uses MPI I/O to provide parallel access to a file. - -

-
hid_t H5Pget_driver (hid_t access_properties) -
This function returns the constant H5FD_MPI if the - mpi driver is defined as the low-level driver for the - specified access property list. - -

-
herr_t H5Pset_fapl_mpi (hid_t access_properties, MPI_Comm - comm, MPI_info info) -
The file access properties are set to use the mpi - driver and any previously defined driver properties are erased from - the property list. Additional parameters may be added to this - function in the future. - -

-
herr_t H5Pget_fapl_mpi - (hid_t access_properties, - MPI_Comm *comm, - MPI_info *info) -
If the file access property list is set to the mpi driver - then this function returns zero and comm, and info - are set to the values stored in the property - list; otherwise the function returns a negative value. In the future, - additional arguments may be added to this function to match those - added to H5Pset_fapl_mpi(). -
- - -

6.5. File Families

-
- -

A single HDF5 address space may be split into multiple files which, - together, form a file family. Each member of the family must be the - same logical size although the size and disk storage reported by - ls(1) may be substantially smaller. The name passed to - H5Fcreate() or H5Fopen() should include a - printf(3c) style integer format specifier which will be - replaced with the family member number (the first family member is - zero). - -

Any HDF5 file can be split into a family of files by running - the file through split(1) and numbering the output - files. However, because HDF5 is lazy about extending the size - of family members, a valid file cannot generally be created by - concatenation of the family members. Additionally, - split and cat don't attempt to - generate files with holes. The h5repart program - can be used to repartition an HDF5 file or family into another - file or family and preserves holes in the files. - -

-
h5repart [-v] [-b - block_size[suffix]] [-m - member_size[suffix]] source - destination -
This program repartitions an HDF5 file by copying the source - file or family to the destination file or family preserving - holes in the underlying Unix files. Families are used for the - source and/or destination if the name includes a - printf-style integer format such as "%d". The - -v switch prints input and output file names on - the standard error stream for progress monitoring, - -b sets the I/O block size (the default is 1kB), - and -m sets the output member size if the - destination is a family name (the default is 1GB). The block - and member sizes may be suffixed with the letters - g, m, or k for GB, MB, - or kB respectively. - -

-
hid_t H5Pget_driver (hid_t access_properties) -
This function returns the constant H5FD_FAMILY if - the family driver is defined as the low-level driver for the - specified access property list. - -

-
herr_t H5Pset_fapl_family (hid_t access_properties, - hsize_t memb_size, hid_t member_properties) -
The file access properties are set to use the family - driver and any previously defined driver properties are erased - from the property list. Each member of the file family will - use member_properties as its file access property - list. The memb_size argument gives the logical size - in bytes of each family member but the actual size could be - smaller depending on whether the file contains holes. The - member size is only used when creating a new file or - truncating an existing file; otherwise the member size comes - from the size of the first member of the family being - opened. Note: if the size of the off_t type is - four bytes then the maximum family member size is usually - 2^31-1 because the byte at offset 2,147,483,647 is generally - inaccessible. Additional parameters may be added to this - function in the future. - -

-
herr_t H5Pget_fapl_family (hid_t access_properties, - hsize_t *memb_size, - hid_t *member_properties) -
If the file access property list is set to the family - driver then this function returns zero; otherwise the function - returns a negative value. On successful return, - access_properties will point to a copy of the member - access property list which should be closed by calling - H5Pclose() when the application is finished with - it. If memb_size is non-null then it will contain - the logical size in bytes of each family member. In the - future, additional arguments may be added to this function to - match those added to H5Pset_fapl_family(). -
- -

6.6. Split Meta/Raw Files

- -

On occasion, it might be useful to separate meta data from raw - data. The split driver does this by creating two files: one for - meta data and another for raw data. The application provides a base - file name to H5Fcreate() or H5Fopen() and this - driver appends a file extension which defaults to .meta for - the meta data file and .raw for the raw data file. - Each file can have its own - file access property list which allows, for instance, a split file with - meta data stored with the core driver and raw data stored with - the sec2 driver. - -

-
hid_t H5Pget_driver (hid_t access_properties) -
This function returns the constant H5FD_SPLIT if - the split driver is defined as the low-level driver for the - specified access property list. - -

-
herr_t H5Pset_fapl_split (hid_t access_properties, - const char *meta_extension, - hid_t meta_properties, const char *raw_extension, - hid_t raw_properties) -
The file access properties are set to use the split - driver and any previously defined driver properties are erased from - the property list. The meta file will have a name which is formed by - adding meta_extension (or .meta) to the end of - the base name and will be accessed according to the - meta_properties. The raw file will have a name which is - formed by appending raw_extension (or .raw) to the base - name and will be accessed according to the raw_properties. - Additional parameters may be added to this function in the future. - -
- - -
-
- - - -
- HDF5 documents and links 
- Introduction to HDF5 
- HDF5 Reference Manual 
- HDF5 User's Guide for Release 1.6 
- -
- And in this document, the - HDF5 User's Guide from Release 1.4.5:     -
- Files   - Datasets   - Datatypes   - Dataspaces   - Groups   -
- References   - Attributes   - Property Lists   - Error Handling   -
- Filters   - Caching   - Chunking   - Mounting Files   -
- Performance   - Debugging   - Environment   - DDL   -
-
-
-
-HDF Help Desk -
-Describes HDF5 Release 1.4.5, February 2003 -
- -Last modified: 26 April 2001 - - - - diff --git a/doc/html/Filters.html b/doc/html/Filters.html deleted file mode 100644 index a253cfb..0000000 --- a/doc/html/Filters.html +++ /dev/null @@ -1,593 +0,0 @@ - - - - Filters - - - - - - - - - - -
-
- - - -
- HDF5 documents and links 
- Introduction to HDF5 
- HDF5 Reference Manual 
- HDF5 User's Guide for Release 1.6 
- -
- And in this document, the - HDF5 User's Guide from Release 1.4.5:     -
- Files   - Datasets   - Datatypes   - Dataspaces   - Groups   -
- References   - Attributes   - Property Lists   - Error Handling   -
- Filters   - Caching   - Chunking   - Mounting Files   -
- Performance   - Debugging   - Environment   - DDL   -
-
-
-

Filters in HDF5

- - Note: Transient pipelines described in this document have not - been implemented. - -

1. Introduction

- -

HDF5 allows chunked data1 - to pass through user-defined filters - on the way to or from disk. The filters operate on chunks of an - H5D_CHUNKED dataset can be arranged in a pipeline - so output of one filter becomes the input of the next filter. - -

Each filter has a two-byte identification number (type - H5Z_filter_t) allocated by NCSA and can also be - passed application-defined integer resources to control its - behavior. Each filter also has an optional ASCII comment - string. - -

-

- - - - - - - - - - - - - - - - - - - - - -
- Values for H5Z_filter_t -
ValueDescription
0-255These values are reserved for filters predefined and - registered by the HDF5 library and of use to the general - public. They are described in a separate section - below.
256-511Filter numbers in this range are used for testing only - and can be used temporarily by any organization. No - attempt is made to resolve numbering conflicts since all - definitions are by nature temporary.
512-65535Reserved for future assignment. Please contact the - HDF5 development - team to reserve a value or range of values for - use by your filters.
-
- -

2. Defining and Querying the Filter Pipeline

- -

Two types of filters can be applied to raw data I/O: permanent - filters and transient filters. The permanent filter pipeline is - defned when the dataset is created while the transient pipeline - is defined for each I/O operation. During an - H5Dwrite() the transient filters are applied first - in the order defined and then the permanent filters are applied - in the order defined. For an H5Dread() the - opposite order is used: permanent filters in reverse order, then - transient filters in reverse order. An H5Dread() - must result in the same amount of data for a chunk as the - original H5Dwrite(). - -

The permanent filter pipeline is defined by calling - H5Pset_filter() for a dataset creation property - list while the transient filter pipeline is defined by calling - that function for a dataset transfer property list. - -

-
herr_t H5Pset_filter (hid_t plist, - H5Z_filter_t filter, unsigned int flags, - size_t cd_nelmts, const unsigned int - cd_values[]) -
This function adds the specified filter and - corresponding properties to the end of the transient or - permanent output filter pipeline (depending on whether - plist is a dataset creation or dataset transfer - property list). The flags argument specifies certain - general properties of the filter and is documented below. The - cd_values is an array of cd_nelmts integers - which are auxiliary data for the filter. The integer values - will be stored in the dataset object header as part of the - filter information. - -

-
int H5Pget_nfilters (hid_t plist) -
This function returns the number of filters defined in the - permanent or transient filter pipeline depending on whether - plist is a dataset creation or dataset transfer - property list. In each pipeline the filters are numbered from - 0 through N-1 where N is the value returned - by this function. During output to the file the filters of a - pipeline are applied in increasing order (the inverse is true - for input). Zero is returned if there are no filters in the - pipeline and a negative value is returned for errors. - -

-
H5Z_filter_t H5Pget_filter (hid_t plist, - int filter_number, unsigned int *flags, - size_t *cd_nelmts, unsigned int - *cd_values, size_t namelen, char name[]) -
This is the query counterpart of - H5Pset_filter() and returns information about a - particular filter number in a permanent or transient pipeline - depending on whether plist is a dataset creation or - dataset transfer property list. On input, cd_nelmts - indicates the number of entries in the cd_values - array allocated by the caller while on exit it contains the - number of values defined by the filter. The - filter_number should be a value between zero and - N-1 as described for H5Pget_nfilters() - and the function will return failure (a negative value) if the - filter number is out of range. If name is a pointer - to an array of at least namelen bytes then the filter - name will be copied into that array. The name will be null - terminated if the namelen is large enough. The - filter name returned will be the name appearing in the file or - else the name registered for the filter or else an empty string. -
- -

The flags argument to the functions above is a bit vector of - the following fields: - -

-

- - - - - - - - - - - - -
- Values for the flags argument -
ValueDescription
H5Z_FLAG_OPTIONALIf this bit is set then the filter is optional. If - the filter fails (see below) during an - H5Dwrite() operation then the filter is - just excluded from the pipeline for the chunk for which - it failed; the filter will not participate in the - pipeline during an H5Dread() of the chunk. - This is commonly used for compression filters: if the - compression result would be larger than the input then - the compression filter returns failure and the - uncompressed data is stored in the file. If this bit is - clear and a filter fails then the - H5Dwrite() or H5Dread() also - fails.
-
- -

3. Defining Filters

- -

Each filter is bidirectional, handling both input and output to - the file, and a flag is passed to the filter to indicate the - direction. In either case the filter reads a chunk of data from - a buffer, usually performs some sort of transformation on the - data, places the result in the same or new buffer, and returns - the buffer pointer and size to the caller. If something goes - wrong the filter should return zero to indicate a failure. - -

During output, a filter that fails or isn't defined and is - marked as optional is silently excluded from the pipeline and - will not be used when reading that chunk of data. A required - filter that fails or isn't defined causes the entire output - operation to fail. During input, any filter that has not been - excluded from the pipeline during output and fails or is not - defined will cause the entire input operation to fail. - -

Filters are defined in two phases. The first phase is to - define a function to act as the filter and link the function - into the application. The second phase is to register the - function, associating the function with an - H5Z_filter_t identification number and a comment. - -

-
typedef size_t (*H5Z_func_t)(unsigned int - flags, size_t cd_nelmts, const unsigned int - cd_values[], size_t nbytes, size_t - *buf_size, void **buf) -
The flags, cd_nelmts, and - cd_values are the same as for the - H5Pset_filter() function with the additional flag - H5Z_FLAG_REVERSE which is set when the filter is - called as part of the input pipeline. The input buffer is - pointed to by *buf and has a total size of - *buf_size bytes but only nbytes are valid - data. The filter should perform the transformation in place if - possible and return the number of valid bytes or zero for - failure. If the transformation cannot be done in place then - the filter should allocate a new buffer with - malloc() and assign it to *buf, - assigning the allocated size of that buffer to - *buf_size. The old buffer should be freed - by calling free(). - -

-
herr_t H5Zregister (H5Z_filter_t filter_id, - const char *comment, H5Z_func_t - filter) -
The filter function is associated with a filter - number and a short ASCII comment which will be stored in the - hdf5 file if the filter is used as part of a permanent - pipeline during dataset creation. -
- - -

4. Predefined Filters

- -

If zlib version 1.1.2 or later was found - during configuration then the library will define a filter whose - H5Z_filter_t number is - H5Z_FILTER_DEFLATE. Since this compression method - has the potential for generating compressed data which is larger - than the original, the H5Z_FLAG_OPTIONAL flag - should be turned on so such cases can be handled gracefully by - storing the original data instead of the compressed data. The - cd_nvalues should be one with cd_value[0] - being a compression agression level between zero and nine, - inclusive (zero is the fastest compression while nine results in - the best compression ratio). - -

A convenience function for adding the - H5Z_FILTER_DEFLATE filter to a pipeline is: - -

-
herr_t H5Pset_deflate (hid_t plist, unsigned - aggression) -
The deflate compression method is added to the end of the - permanent or transient filter pipeline depending on whether - plist is a dataset creation or dataset transfer - property list. The aggression is a number between - zero and nine (inclusive) to indicate the tradeoff between - speed and compression ratio (zero is fastest, nine is best - ratio). -
- -

Even if the zlib isn't detected during - configuration the application can define - H5Z_FILTER_DEFLATE as a permanent filter. If the - filter is marked as optional (as with - H5Pset_deflate()) then it will always fail and be - automatically removed from the pipeline. Applications that read - data will fail only if the data is actually compressed; they - won't fail if H5Z_FILTER_DEFLATE was part of the - permanent output pipeline but was automatically excluded because - it didn't exist when the data was written. - -

zlib can be acquired from - http://www.cdrom.com/pub/infozip/zlib/. - -

5. Example

- -

This example shows how to define and register a simple filter - that adds a checksum capability to the data stream. - -

The function that acts as the filter always returns zero - (failure) if the md5() function was not detected at - configuration time (left as an excercise for the reader). - Otherwise the function is broken down to an input and output - half. The output half calculates a checksum, increases the size - of the output buffer if necessary, and appends the checksum to - the end of the buffer. The input half calculates the checksum - on the first part of the buffer and compares it to the checksum - already stored at the end of the buffer. If the two differ then - zero (failure) is returned, otherwise the buffer size is reduced - to exclude the checksum. - -

-

- - - - -
-

-
-size_t
-md5_filter(unsigned int flags, size_t cd_nelmts,
-           const unsigned int cd_values[], size_t nbytes,
-           size_t *buf_size, void **buf)
-{
-#ifdef HAVE_MD5
-    unsigned char       cksum[16];
-
-    if (flags & H5Z_REVERSE) {
-        /* Input */
-        assert(nbytes>=16);
-        md5(nbytes-16, *buf, cksum);
-
-        /* Compare */
-        if (memcmp(cksum, (char*)(*buf)+nbytes-16, 16)) {
-            return 0; /*fail*/
-        }
-
-        /* Strip off checksum */
-        return nbytes-16;
-            
-    } else {
-        /* Output */
-        md5(nbytes, *buf, cksum);
-
-        /* Increase buffer size if necessary */
-        if (nbytes+16>*buf_size) {
-            *buf_size = nbytes + 16;
-            *buf = realloc(*buf, *buf_size);
-        }
-
-        /* Append checksum */
-        memcpy((char*)(*buf)+nbytes, cksum, 16);
-        return nbytes+16;
-    }
-#else
-    return 0; /*fail*/
-#endif
-}
-	      
-
-
- -

Once the filter function is defined it must be registered so - the HDF5 library knows about it. Since we're testing this - filter we choose one of the H5Z_filter_t numbers - from the reserved range. We'll randomly choose 305. - -

-

- - - - -
-

-
-#define FILTER_MD5 305
-herr_t status = H5Zregister(FILTER_MD5, "md5 checksum", md5_filter);
-	      
-
-
- -

Now we can use the filter in a pipeline. We could have added - the filter to the pipeline before defining or registering the - filter as long as the filter was defined and registered by time - we tried to use it (if the filter is marked as optional then we - could have used it without defining it and the library would - have automatically removed it from the pipeline for each chunk - written before the filter was defined and registered). - -

-

- - - - -
-

-
-hid_t dcpl = H5Pcreate(H5P_DATASET_CREATE);
-hsize_t chunk_size[3] = {10,10,10};
-H5Pset_chunk(dcpl, 3, chunk_size);
-H5Pset_filter(dcpl, FILTER_MD5, 0, 0, NULL);
-hid_t dset = H5Dcreate(file, "dset", H5T_NATIVE_DOUBLE, space, dcpl);
-	      
-
-
- -

6. Filter Diagnostics

- -

If the library is compiled with debugging turned on for the H5Z - layer (usually as a result of configure - --enable-debug=z) then filter statistics are printed when - the application exits normally or the library is closed. The - statistics are written to the standard error stream and include - two lines for each filter that was used: one for input and one - for output. The following fields are displayed: - -

-

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Field NameDescription
MethodThis is the name of the method as defined with - H5Zregister() with the charaters - "< or ">" prepended to indicate - input or output.
TotalThe total number of bytes processed by the filter - including errors. This is the maximum of the - nbytes argument or the return value. -
ErrorsThis field shows the number of bytes of the Total - column which can be attributed to errors.
User, System, ElapsedThese are the amount of user time, system time, and - elapsed time in seconds spent in the filter function. - Elapsed time is sensitive to system load. These times - may be zero on operating systems that don't support the - required operations.
BandwidthThis is the filter bandwidth which is the total - number of bytes processed divided by elapsed time. - Since elapsed time is subject to system load the - bandwidth numbers cannot always be trusted. - Furthermore, the bandwidth includes bytes attributed to - errors which may significanly taint the value if the - function is able to detect errors without much - expense.
-
- -

-

- - - - - -
- Example: Filter Statistics -
-

-H5Z: filter statistics accumulated over life of library:
-   Method     Total  Errors  User  System  Elapsed Bandwidth
-   ------     -----  ------  ----  ------  ------- ---------
-   >deflate  160000   40000  0.62    0.74     1.33 117.5 kBs
-   <deflate  120000       0  0.11    0.00     0.12 1.000 MBs
-	      
-
-
- - -
- - -

Footnote 1: Dataset chunks can be compressed - through the use of filters. Developers should be aware that - reading and rewriting compressed chunked data can result in holes - in an HDF5 file. In time, enough such holes can increase the - file size enough to impair application or library performance - when working with that file. See - “Freespace Management” - in the chapter - “Performance Analysis and Issues.”

- - -
-
- - - -
- HDF5 documents and links 
- Introduction to HDF5 
- HDF5 Reference Manual 
- HDF5 User's Guide for Release 1.6 
- -
- And in this document, the - HDF5 User's Guide from Release 1.4.5:     -
- Files   - Datasets   - Datatypes   - Dataspaces   - Groups   -
- References   - Attributes   - Property Lists   - Error Handling   -
- Filters   - Caching   - Chunking   - Mounting Files   -
- Performance   - Debugging   - Environment   - DDL   -
-
-
-
-HDF Help Desk -
-Describes HDF5 Release 1.4.5, February 2003 -
- -Last modified: 2 August 2001 - - - - - diff --git a/doc/html/Glossary.html b/doc/html/Glossary.html deleted file mode 100644 index fd32c97..0000000 --- a/doc/html/Glossary.html +++ /dev/null @@ -1,573 +0,0 @@ - -HDF5 Glossary - - - - - - - - - - - -
-
- - - -
- HDF5 documents and links 
- Introduction to HDF5 
- -
- HDF5 User's Guide 
- HDF5 Reference Manual 
- HDF5 Application Developer's Guide 
-
-
-
-
-

HDF5 Glossary

-
- -
- - - - -
-
-
atomic datatype -
attribute - -
chunked layout -
chunking - -
compound datatype - -
contiguous layout - -
dataset -
dataspace - - - - -
datatype -
-
atomic - - -
compound - - -
enumeration -
named -
opaque - -
variable-length -
- - - - -
enumeration datatype -
file -
- - -
group -
path -
root group -
super block -
-
-
-
-
file access mode -
group -
- -
member -
root group -
-
hard link - -
hyperslab -
identifier -
link -
-
hard -
soft - - -
-
member -
name -
named datatype -
opaque datatype -
path - -
property list -
-
data transfer -
dataset access -
dataset creation -
file access -
file creation -
-
-
-
-
root group -
selection -
-
hyperslab - -
-
serialization -
soft link - -
storage layout -
-
chunked -
chunking -
contiguous -
-
super block - - - -
variable-length datatype -
-
-
- -
- - -
- -
atomic datatype -
A datatype which cannot be decomposed into smaller units at the - API level. -

- -

attribute -
A small dataset that can be used to describe the nature and/or - the intended usage of the object it is attached to. -

- - - -

chunked layout -
The storage layout of a chunked dataset. -

- -

chunking -
A storage layout where a dataset is partitioned into fixed-size - multi-dimensional chunks. Chunking tends to improve performance - and facilitates dataset extensibility. -

- -

compound datatype -
A collection of one or more atomic types or small arrays of such types. - Similar to a struct in C or a common block in Fortran. -

- - - -

contiguous layout -
The storage layout of a dataset that is not chunked, so that the entire - data portion of the dataset is stored in a single contiguous block. -

- -

data transfer property list -
The data transfer property list is used to control various aspects - of the I/O, such as caching hints or collective I/O information. -

- -

dataset -
A multi-dimensional array of data elements, together with - supporting metadata. -

- - -

dataset access property list -
A property list containing information on how a dataset is to be accessed. -

- -

dataset creation property list -
A property list containing information on how - raw data is organized on disk and how the raw data is compressed. - -

- -

dataspace -
An object that describes the dimensionality of the data array. - A dataspace is either a regular N-dimensional array of data points, - called a simple dataspace, or a more general collection of data points - organized in another manner, called a complex dataspace. -

- -

datatype -
An object that describes the storage format of the individual data - points of a data set. - There are two categories of datatypes: atomic and compound datatypes. - An atomic type is a type which cannot be decomposed into smaller - units at the API level. A compound datatype is a collection of one or - more atomic types or small arrays of such types. -

- - - - - -

enumeration datatype -
A one-to-one mapping between a set of symbols and a set of - integer values, and an order is imposed on the symbols by their - integer values. The symbols are passed between the application - and library as character strings and all the values for a - particular enumeration datatype are of the same integer type, - which is not necessarily a native type. -

- -

file -
A container for storing grouped collections of - multi-dimensional arrays containing scientific data. -

- -

file access mode -
Determines whether an existing file will be overwritten, - opened for read-only access, or opened for read/write access. - All newly created files are opened for both reading and - writing. - -

- -

file access property list -
File access property lists are used to control different methods - of performing I/O on files: - -

- -

file creation property list -
The property list used to control file metadata. - -

- -

group -
A structure containing zero or more HDF5 objects, - together with supporting metadata. - The two primary HDF5 objects are datasets and groups. -

- -

hard link -
A direct association between a name and the object where both exist - in a single HDF5 address space. -

- - - -

hyperslab -
A portion of a dataset. A hyperslab selection can be a - logically contiguous collection of points in a dataspace or - a regular pattern of points or blocks in a dataspace. -

- -

identifier -
A unique entity provided by the HDF5 library and used to access - an HDF5 object, such as a file, goup, dataset, datatype, etc. -

- -

link -
An association between a name and the object in an HDF5 file group. -

- -

member -
A group or dataset that is in another dataset, dataset A, - is a member of dataset A. -

- -

name -
A slash-separated list of components that uniquely identifies an - element of an HDF5 file. A name begins that begins with a slash - is an absolute name which is accessed beginning with the root group - of the file; all other names are relative names and the associated - objects are accessed beginning with the current or specified group. -

- -

named datatype -
A datatype that is named and stored in a file. Naming is permanent; - a datatype cannot be changed after being named. -

- -

opaque datatype -
A mechanism for describing data which cannot be otherwise described - by HDF5. The only properties associated with opaque types are a - size in bytes and an ASCII tag. -

- - - -

path -
The slash-separated list of components that forms the name - uniquely identifying an element of an HDF5 file. -

- -

property list -
A collection of name/value pairs that can be passed to other - HDF5 functions to control features that are typically unimportant - or whose default values are usually used. -

- -

root group -
The group that is the entry point to the group graph in an HDF5 file. - Every HDF5 file has exactly one root group. -

- -

selection -
(1) A subset of a dataset or a dataspace, up to the entire dataset or - dataspace. - (2) The elements of an array or dataset that are marked for I/O. -

- -

serialization -
The flattening of an N-dimensional data object into a - 1-dimensional object so that, for example, the data object can be - transmitted over the network as a 1-dimensional bitstream. -

- -

soft link -
An indirect association between a name and an object in an - HDF5 file group. -

- -

storage layout -
The manner in which a dataset is stored, either contiguous or - chunked, in the HDF5 file. -

- -

super block -
A block of data containing the information required to portably access - HDF5 files on multiple platforms, followed by information about the groups - and datasets in the file. - The super block contains information about the size of offsets, - lengths of objects, the number of entries in group tables, - and additional version information for the file. -

- - - -

variable-length datatype -
A sequence of an existing datatype (atomic, variable-length (VL), - or compound) which are not fixed in length from one dataset location - to another. -

- -

- - -
-
- - - -
- HDF5 documents and links 
- Introduction to HDF5 
- -
- HDF5 User's Guide 
- HDF5 Reference Manual 
- HDF5 Application Developer's Guide 
-
-
-
-
-HDF Help Desk -
-Describes HDF5 Release 1.7, the unreleased development branch; working toward HDF5 Release 1.8.0 -
- - - diff --git a/doc/html/Graphics/C++.gif b/doc/html/Graphics/C++.gif deleted file mode 100755 index 120b7cc..0000000 Binary files a/doc/html/Graphics/C++.gif and /dev/null differ diff --git a/doc/html/Graphics/FORTRAN.gif b/doc/html/Graphics/FORTRAN.gif deleted file mode 100755 index d08a451..0000000 Binary files a/doc/html/Graphics/FORTRAN.gif and /dev/null differ diff --git a/doc/html/Graphics/Java.gif b/doc/html/Graphics/Java.gif deleted file mode 100755 index a064d1d..0000000 Binary files a/doc/html/Graphics/Java.gif and /dev/null differ diff --git a/doc/html/Graphics/Makefile.am b/doc/html/Graphics/Makefile.am deleted file mode 100644 index 3e65c67..0000000 --- a/doc/html/Graphics/Makefile.am +++ /dev/null @@ -1,17 +0,0 @@ -# HDF5 Library Doc Makefile(.in) -# -# Copyright (C) 1997, 2002 -# National Center for Supercomputing Applications. -# All rights reserved. -# -## -## Makefile.am -## Run automake to generate a Makefile.in from this file. -# - -include $(top_srcdir)/config/commence-doc.am - -localdocdir=$(docdir)/hdf5/Graphics - -# Public doc files (to be installed)... -localdoc_DATA=C++.gif FORTRAN.gif Java.gif OtherAPIs.gif diff --git a/doc/html/Graphics/Makefile.in b/doc/html/Graphics/Makefile.in deleted file mode 100644 index 50d0abf..0000000 --- a/doc/html/Graphics/Makefile.in +++ /dev/null @@ -1,485 +0,0 @@ -# Makefile.in generated by automake 1.9.5 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005 Free Software Foundation, Inc. -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# HDF5 Library Doc Makefile(.in) -# -# Copyright (C) 1997, 2002 -# National Center for Supercomputing Applications. -# All rights reserved. -# -# - -srcdir = @srcdir@ -top_srcdir = @top_srcdir@ -VPATH = @srcdir@ -pkgdatadir = $(datadir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -top_builddir = ../../.. -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -INSTALL = @INSTALL@ -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ - $(top_srcdir)/config/commence-doc.am \ - $(top_srcdir)/config/commence.am -subdir = doc/html/Graphics -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/configure.in -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(SHELL) $(top_srcdir)/bin/mkinstalldirs -CONFIG_HEADER = $(top_builddir)/src/H5config.h -CONFIG_CLEAN_FILES = -SOURCES = -DIST_SOURCES = -am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; -am__vpath_adj = case $$p in \ - $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ - *) f=$$p;; \ - esac; -am__strip_dir = `echo $$p | sed -e 's|^.*/||'`; -am__installdirs = "$(DESTDIR)$(localdocdir)" -localdocDATA_INSTALL = $(INSTALL_DATA) -DATA = $(localdoc_DATA) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) - -# Set the paths for AFS installs of autotools for Linux machines -# Ideally, these tools should never be needed during the build. -ACLOCAL = /afs/ncsa/projects/hdf/packages/automake_1.9.5/Linux_2.4/bin/aclocal -I /afs/ncsa/projects/hdf/packages/libtool_1.5.14/Linux_2.4/share/aclocal -ADD_PARALLEL_FILES = @ADD_PARALLEL_FILES@ -AMDEP_FALSE = @AMDEP_FALSE@ -AMDEP_TRUE = @AMDEP_TRUE@ -AMTAR = @AMTAR@ -AM_MAKEFLAGS = @AM_MAKEFLAGS@ -AR = @AR@ -AUTOCONF = /afs/ncsa/projects/hdf/packages/autoconf_2.59/Linux_2.4/bin/autoconf -AUTOHEADER = /afs/ncsa/projects/hdf/packages/autoconf_2.59/Linux_2.4/bin/autoheader -AUTOMAKE = /afs/ncsa/projects/hdf/packages/automake_1.9.5/Linux_2.4/bin/automake -AWK = @AWK@ -BUILD_CXX_CONDITIONAL_FALSE = @BUILD_CXX_CONDITIONAL_FALSE@ -BUILD_CXX_CONDITIONAL_TRUE = @BUILD_CXX_CONDITIONAL_TRUE@ -BUILD_FORTRAN_CONDITIONAL_FALSE = @BUILD_FORTRAN_CONDITIONAL_FALSE@ -BUILD_FORTRAN_CONDITIONAL_TRUE = @BUILD_FORTRAN_CONDITIONAL_TRUE@ -BUILD_HDF5_HL_CONDITIONAL_FALSE = @BUILD_HDF5_HL_CONDITIONAL_FALSE@ -BUILD_HDF5_HL_CONDITIONAL_TRUE = @BUILD_HDF5_HL_CONDITIONAL_TRUE@ -BUILD_PABLO_CONDITIONAL_FALSE = @BUILD_PABLO_CONDITIONAL_FALSE@ -BUILD_PABLO_CONDITIONAL_TRUE = @BUILD_PABLO_CONDITIONAL_TRUE@ -BUILD_PARALLEL_CONDITIONAL_FALSE = @BUILD_PARALLEL_CONDITIONAL_FALSE@ -BUILD_PARALLEL_CONDITIONAL_TRUE = @BUILD_PARALLEL_CONDITIONAL_TRUE@ -BUILD_PDB2HDF = @BUILD_PDB2HDF@ -BUILD_PDB2HDF_CONDITIONAL_FALSE = @BUILD_PDB2HDF_CONDITIONAL_FALSE@ -BUILD_PDB2HDF_CONDITIONAL_TRUE = @BUILD_PDB2HDF_CONDITIONAL_TRUE@ -BYTESEX = @BYTESEX@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CC_VERSION = @CC_VERSION@ -CFLAGS = @CFLAGS@ -CONFIG_DATE = @CONFIG_DATE@ -CONFIG_MODE = @CONFIG_MODE@ -CONFIG_USER = @CONFIG_USER@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CXX = @CXX@ -CXXCPP = @CXXCPP@ -CXXDEPMODE = @CXXDEPMODE@ -CXXFLAGS = @CXXFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEBUG_PKG = @DEBUG_PKG@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -DYNAMIC_DIRS = @DYNAMIC_DIRS@ -ECHO = @ECHO@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -F77 = @F77@ - -# Make sure that these variables are exported to the Makefiles -F9XMODEXT = @F9XMODEXT@ -F9XMODFLAG = @F9XMODFLAG@ -F9XSUFFIXFLAG = @F9XSUFFIXFLAG@ -FC = @FC@ -FCFLAGS = @FCFLAGS@ -FCLIBS = @FCLIBS@ -FFLAGS = @FFLAGS@ -FILTERS = @FILTERS@ -FSEARCH_DIRS = @FSEARCH_DIRS@ -H5_VERSION = @H5_VERSION@ -HADDR_T = @HADDR_T@ -HDF5_INTERFACES = @HDF5_INTERFACES@ -HID_T = @HID_T@ -HL = @HL@ -HL_FOR = @HL_FOR@ -HSIZET = @HSIZET@ -HSIZE_T = @HSIZE_T@ -HSSIZE_T = @HSSIZE_T@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -INSTRUMENT_LIBRARY = @INSTRUMENT_LIBRARY@ -LDFLAGS = @LDFLAGS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LIBTOOL = @LIBTOOL@ -LN_S = @LN_S@ -LTLIBOBJS = @LTLIBOBJS@ -LT_STATIC_EXEC = @LT_STATIC_EXEC@ -MAINT = @MAINT@ -MAINTAINER_MODE_FALSE = @MAINTAINER_MODE_FALSE@ -MAINTAINER_MODE_TRUE = @MAINTAINER_MODE_TRUE@ -MAKEINFO = @MAKEINFO@ -MPE = @MPE@ -OBJECT_NAMELEN_DEFAULT_F = @OBJECT_NAMELEN_DEFAULT_F@ -OBJEXT = @OBJEXT@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PARALLEL = @PARALLEL@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PERL = @PERL@ -PTHREAD = @PTHREAD@ -RANLIB = @RANLIB@ -ROOT = @ROOT@ -RUNPARALLEL = @RUNPARALLEL@ -RUNSERIAL = @RUNSERIAL@ -R_INTEGER = @R_INTEGER@ -R_LARGE = @R_LARGE@ -SEARCH = @SEARCH@ -SETX = @SETX@ -SET_MAKE = @SET_MAKE@ - -# Hardcode SHELL to be /bin/sh. Most machines have this shell, and -# on at least one machine configure fails to detect its existence (janus). -# Also, when HDF5 is configured on one machine but run on another, -# configure's automatic SHELL detection may not work on the build machine. -SHELL = /bin/sh -SIZE_T = @SIZE_T@ -STATIC_SHARED = @STATIC_SHARED@ -STRIP = @STRIP@ -TESTPARALLEL = @TESTPARALLEL@ -TRACE_API = @TRACE_API@ -USE_FILTER_DEFLATE = @USE_FILTER_DEFLATE@ -USE_FILTER_FLETCHER32 = @USE_FILTER_FLETCHER32@ -USE_FILTER_NBIT = @USE_FILTER_NBIT@ -USE_FILTER_SCALEOFFSET = @USE_FILTER_SCALEOFFSET@ -USE_FILTER_SHUFFLE = @USE_FILTER_SHUFFLE@ -USE_FILTER_SZIP = @USE_FILTER_SZIP@ -VERSION = @VERSION@ -ac_ct_AR = @ac_ct_AR@ -ac_ct_CC = @ac_ct_CC@ -ac_ct_CXX = @ac_ct_CXX@ -ac_ct_F77 = @ac_ct_F77@ -ac_ct_FC = @ac_ct_FC@ -ac_ct_RANLIB = @ac_ct_RANLIB@ -ac_ct_STRIP = @ac_ct_STRIP@ -am__fastdepCC_FALSE = @am__fastdepCC_FALSE@ -am__fastdepCC_TRUE = @am__fastdepCC_TRUE@ -am__fastdepCXX_FALSE = @am__fastdepCXX_FALSE@ -am__fastdepCXX_TRUE = @am__fastdepCXX_TRUE@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_vendor = @build_vendor@ -datadir = @datadir@ -exec_prefix = @exec_prefix@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ - -# Install directories that automake doesn't know about -includedir = $(exec_prefix)/include -infodir = @infodir@ -install_sh = @install_sh@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localstatedir = @localstatedir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -oldincludedir = @oldincludedir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ - -# Shell commands used in Makefiles -RM = rm -f -CP = cp - -# Some machines need a command to run executables; this is that command -# so that our tests will run. -# We use RUNTESTS instead of RUNSERIAL directly because it may be that -# some tests need to be run with a different command. Older versions -# of the makefiles used the command -# $(LIBTOOL) --mode=execute -# in some directories, for instance. -RUNTESTS = $(RUNSERIAL) - -# Libraries to link to while building -LIBHDF5 = $(top_builddir)/src/libhdf5.la -LIBH5TEST = $(top_builddir)/test/libh5test.la -LIBH5F = $(top_builddir)/fortran/src/libhdf5_fortran.la -LIBH5FTEST = $(top_builddir)/fortran/test/libh5test_fortran.la -LIBH5CPP = $(top_builddir)/c++/src/libhdf5_cpp.la -LIBH5TOOLS = $(top_builddir)/tools/lib/libh5tools.la -LIBH5_HL = $(top_builddir)/hl/src/libhdf5_hl.la -LIBH5F_HL = $(top_builddir)/hl/fortran/src/libhdf5hl_fortran.la -LIBH5CPP_HL = $(top_builddir)/hl/c++/src/libhdf5_hl_cpp.la -docdir = $(exec_prefix)/doc - -# Scripts used to build examples -H5CC = $(bindir)/h5cc -H5CC_PP = $(bindir)/h5pcc -H5FC = $(bindir)/h5fc -H5FC_PP = $(bindir)/h5pfc - -# .chkexe and .chksh files are used to mark tests that have run successfully. -MOSTLYCLEANFILES = *.chkexe *.chksh -localdocdir = $(docdir)/hdf5/Graphics - -# Public doc files (to be installed)... -localdoc_DATA = C++.gif FORTRAN.gif Java.gif OtherAPIs.gif -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/config/commence-doc.am $(top_srcdir)/config/commence.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \ - && exit 0; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign doc/html/Graphics/Makefile'; \ - cd $(top_srcdir) && \ - $(AUTOMAKE) --foreign doc/html/Graphics/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -mostlyclean-libtool: - -rm -f *.lo - -clean-libtool: - -rm -rf .libs _libs - -distclean-libtool: - -rm -f libtool -uninstall-info-am: -install-localdocDATA: $(localdoc_DATA) - @$(NORMAL_INSTALL) - test -z "$(localdocdir)" || $(mkdir_p) "$(DESTDIR)$(localdocdir)" - @list='$(localdoc_DATA)'; for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - f=$(am__strip_dir) \ - echo " $(localdocDATA_INSTALL) '$$d$$p' '$(DESTDIR)$(localdocdir)/$$f'"; \ - $(localdocDATA_INSTALL) "$$d$$p" "$(DESTDIR)$(localdocdir)/$$f"; \ - done - -uninstall-localdocDATA: - @$(NORMAL_UNINSTALL) - @list='$(localdoc_DATA)'; for p in $$list; do \ - f=$(am__strip_dir) \ - echo " rm -f '$(DESTDIR)$(localdocdir)/$$f'"; \ - rm -f "$(DESTDIR)$(localdocdir)/$$f"; \ - done -tags: TAGS -TAGS: - -ctags: CTAGS -CTAGS: - - -distdir: $(DISTFILES) - $(mkdir_p) $(distdir)/../../../config - @srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's|.|.|g'`; \ - list='$(DISTFILES)'; for file in $$list; do \ - case $$file in \ - $(srcdir)/*) file=`echo "$$file" | sed "s|^$$srcdirstrip/||"`;; \ - $(top_srcdir)/*) file=`echo "$$file" | sed "s|^$$topsrcdirstrip/|$(top_builddir)/|"`;; \ - esac; \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - dir=`echo "$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test "$$dir" != "$$file" && test "$$dir" != "."; then \ - dir="/$$dir"; \ - $(mkdir_p) "$(distdir)$$dir"; \ - else \ - dir=''; \ - fi; \ - if test -d $$d/$$file; then \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \ - fi; \ - cp -pR $$d/$$file $(distdir)$$dir || exit 1; \ - else \ - test -f $(distdir)/$$file \ - || cp -p $$d/$$file $(distdir)/$$file \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: - for dir in "$(DESTDIR)$(localdocdir)"; do \ - test -z "$$dir" || $(mkdir_p) "$$dir"; \ - done -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - `test -z '$(STRIP)' || \ - echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install -mostlyclean-generic: - -test -z "$(MOSTLYCLEANFILES)" || rm -f $(MOSTLYCLEANFILES) - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." -clean: clean-am - -clean-am: clean-generic clean-libtool mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic distclean-libtool - -dvi: dvi-am - -dvi-am: - -html: html-am - -info: info-am - -info-am: - -install-data-am: install-localdocDATA - -install-exec-am: - -install-info: install-info-am - -install-man: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic mostlyclean-libtool - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: uninstall-info-am uninstall-localdocDATA - -.PHONY: all all-am check check-am clean clean-generic clean-libtool \ - distclean distclean-generic distclean-libtool distdir dvi \ - dvi-am html html-am info info-am install install-am \ - install-data install-data-am install-exec install-exec-am \ - install-info install-info-am install-localdocDATA install-man \ - install-strip installcheck installcheck-am installdirs \ - maintainer-clean maintainer-clean-generic mostlyclean \ - mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ - uninstall uninstall-am uninstall-info-am \ - uninstall-localdocDATA - - -# Ignore most rules -lib progs check test _test check-p check-s: - @echo "Nothing to be done" - -tests dep depend: - @@SETX@; for d in X $(SUBDIRS); do \ - if test $$d != X; then \ - (cd $$d && $(MAKE) $(AM_MAKEFLAGS) $@) || exit 1; \ - fi; - done - -# In docs directory, install-doc is the same as install -install-doc install-all: - $(MAKE) $(AM_MAKEFLAGS) install -uninstall-doc uninstall-all: - $(MAKE) $(AM_MAKEFLAGS) uninstall -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/doc/html/Graphics/OtherAPIs.gif b/doc/html/Graphics/OtherAPIs.gif deleted file mode 100755 index 8ae8902..0000000 Binary files a/doc/html/Graphics/OtherAPIs.gif and /dev/null differ diff --git a/doc/html/Groups.html b/doc/html/Groups.html deleted file mode 100644 index 2941008..0000000 --- a/doc/html/Groups.html +++ /dev/null @@ -1,404 +0,0 @@ - - - - Group Interface (H5G) - - - - - - - - - - -
-
- - - -
- HDF5 documents and links 
- Introduction to HDF5 
- HDF5 Reference Manual 
- HDF5 User's Guide for Release 1.6 
- -
- And in this document, the - HDF5 User's Guide from Release 1.4.5:     -
- Files   - Datasets   - Datatypes   - Dataspaces   - Groups   -
- References   - Attributes   - Property Lists   - Error Handling   -
- Filters   - Caching   - Chunking   - Mounting Files   -
- Performance   - Debugging   - Environment   - DDL   -
-
-
-

The Group Interface (H5G)

- -

1. Introduction

- -

An object in HDF5 consists of an object header at a fixed file - address that contains messages describing various properties of - the object such as its storage location, layout, compression, - etc. and some of these messages point to other data such as the - raw data of a dataset. The address of the object header is also - known as an OID and HDF5 has facilities for translating - names to OIDs. - -

Every HDF5 object has at least one name and a set of names can - be stored together in a group. Each group implements a name - space where the names are any length and unique with respect to - other names in the group. - -

Since a group is a type of HDF5 object it has an object header - and a name which exists as a member of some other group. In this - way, groups can be linked together to form a directed graph. - One particular group is called the Root Group and is - the group to which the HDF5 file super block points. Its name is - "/" by convention. The full name of an object is - created by joining component names with slashes much like Unix. - -

-

- Group Graph Example -
- -

However, unlike Unix which arranges directories hierarchically, - HDF5 arranges groups in a directed graph. Therefore, there is - no ".." entry in a group since a group can have more than one - parent. There is no "." entry either but the library understands - it internally. - -

2. Names

- -

HDF5 places few restrictions on names: component names may be - any length except zero and may contain any character except - slash ("/") and the null terminator. A full name may be - composed of any number of component names separated by slashes, - with any of the component names being the special name ".". A - name which begins with a slash is an absolute name - which is looked up beginning at the root group of the file while - all other relative names are looked up beginning at the - specified group. - Multiple consecutive slashes in a full name are treated as - single slashes and trailing slashes are not significant. A - special case is the name "/" (or equivalent) which refers to the - root group. - -

Functions which operate on names generally take a location - identifier which is either a file ID or a group ID and perform - the lookup with respect to that location. Some possibilities - are: - -

-

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Location TypeObject NameDescription
File ID/foo/barThe object bar in group foo - in the root group.
Group ID/foo/barThe object bar in group foo - in the root group of the file containing the specified - group. In other words, the group ID's only purpose is - to supply a file.
File ID/The root group of the specified file.
Group ID/The root group of the file containing the specified - group.
File IDfoo/barThe object bar in group foo - in the specified group.
Group IDfoo/barThe object bar in group foo - in the specified group.
File ID.The root group of the file.
Group ID.The specified group.
Other ID.The specified object.
-
- -

Note, however, that object names within a group must be unique. - For example, H5Dcreate returns an error if a - dataset with the dataset name specified in the parameter list - already exists at the location specified in the parameter list. - - -

3. Creating, Opening, and Closing Groups

- -

Groups are created with the H5Gcreate() function, - and existing groups can be access with - H5Gopen(). Both functions return an object ID which - should be eventually released by calling - H5Gclose(). - -

-
hid_t H5Gcreate (hid_t location_id, const char - *name, size_t size_hint) -
This function creates a new group with the specified - name at the specified location which is either a file ID or a - group ID. The name must not already be taken by some other - object and all parent groups must already exist. The - size_hint is a hint for the number of bytes to - reserve to store the names which will be eventually added to - the new group. Passing a value of zero for size_hint - is usually adequate since the library is able to dynamically - resize the name heap, but a correct hint may result in better - performance. The return value is a handle for the open group - and it should be closed by calling H5Gclose() - when it's no longer needed. A negative value is returned for - failure. - -

-
hid_t H5Gopen (hid_t location_id, const char - *name) -
This function opens an existing group with the specified - name at the specified location which is either a file ID or a - group ID and returns an object ID. The object ID should be - released by calling H5Gclose() when it is no - longer needed. A negative value is returned for failure. - -

-
herr_t H5Gclose (hid_t group_id) -
This function releases resources used by an group which was - opened by H5Gcreate() or - H5Gopen(). After closing a group the - group_id should not be used again. This function - returns zero for success or a negative value for failure. -
- -

4. Objects with Multiple Names

- -

An object (including a group) can have more than one - name. Creating the object gives it the first name, and then - functions described here can be used to give it additional - names. The association between a name and the object is called - a link and HDF5 supports two types of links: a hard - link is a direct association between the name and the - object where both exist in a single HDF5 address space, and a - soft link is an indirect association. - -

-

- Hard Link Example -
- -

-

- Soft Link Example -
- -
-
Object Creation
-
The creation of an object creates a hard link which is - indistinguishable from other hard links that might be added - later. - -

-
herr_t H5Glink (hid_t file_id, H5G_link_t - link_type, const char *current_name, - const char *new_name) -
Creates a new name for an object that has some current name - (possibly one of many names it currently has). If the - link_type is H5G_LINK_HARD then a new - hard link is created. Otherwise if link_type is - H5T_LINK_SOFT a soft link is created which is an - alias for the current_name. When creating a soft - link the object need not exist. This function returns zero - for success or negative for failure. - -

-
herr_t H5Gunlink (hid_t file_id, const char - *name) -
This function removes an association between a name and an - object. Object headers keep track of how many hard links refer - to the object and when the hard link count reaches zero the - object can be removed from the file (but objects which are - open are not removed until all handles to the object are - closed). -
- -

5. Comments

- -

Objects can have a comment associated with them. The comment - is set and queried with these two functions: - -

-
herr_t H5Gset_comment (hid_t loc_id, const - char *name, const char *comment) -
The previous comment (if any) for the specified object is - replace with a new comment. If the comment argument - is the empty string or a null pointer then the comment message - is removed from the object. Comments should be relatively - short, null-terminated, ASCII strings. - -

-
herr_t H5Gget_comment (hid_t loc_id, const - char *name, size_t bufsize, char - *comment) -
The comment string for an object is returned through the - comment buffer. At most bufsize characters - including a null terminator are copied, and the result is - not null terminated if the comment is longer than the supplied - buffer. If an object doesn't have a comment then the empty - string is returned. -
- - -

6. Unlinking Datasets with H5Gmove and H5Gunlink

-
- -

Exercise caution in the use of H5Gmove and - H5Gunlink. - -

Note that H5Gmove and H5Gunlink - each include a step that unlinks pointers to a set or group. - If the link that is removed is on the only path leading - to a dataset or group, that dataset or group will become - inaccessible in the file. - -

Consider the following example. Assume that the group - group2 can only be accessed via the following path, - where top_group is a member of the file's root group: -

-              /top_group/group1/group2/ 
- Using H5Gmove, top_group is renamed - to be a member of group2. At this point, since - top_group was the only route from the root group - to group1, there is no longer a path by which - one can access group1, group2, or - any member datasets. - top_group and any member datasets have also - become inaccessible.

- - -
-
- - - -
- HDF5 documents and links 
- Introduction to HDF5 
- HDF5 Reference Manual 
- HDF5 User's Guide for Release 1.6 
- -
- And in this document, the - HDF5 User's Guide from Release 1.4.5:     -
- Files   - Datasets   - Datatypes   - Dataspaces   - Groups   -
- References   - Attributes   - Property Lists   - Error Handling   -
- Filters   - Caching   - Chunking   - Mounting Files   -
- Performance   - Debugging   - Environment   - DDL   -
-
-
-
-HDF Help Desk -
-Describes HDF5 Release 1.4.5, February 2003 -
- -Last modified: 1 November 2000 - - - - - diff --git a/doc/html/H5.api_map.html b/doc/html/H5.api_map.html deleted file mode 100644 index c35102a..0000000 --- a/doc/html/H5.api_map.html +++ /dev/null @@ -1,849 +0,0 @@ - -HDF5 Legacy API Equivalence - - -
-

HDF5: API Mapping to legacy APIs

-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
FunctionalitynetCDFSDAIOHDF5Comments
Open existing file for read/writencopenSDstartAIO_openH5Fopen
Creates new file for read/write.nccreate

H5FcreateSD API handles this with SDopen
Close filenccloseSDendAIO_closeH5Fclose
Redefine parametersncredef


Unneccessary under SD & HDF5 data-models
End "define" modencendef


Unneccessary under SD & HDF5 data-models
Query the number of datasets, dimensions and attributes in a filencinquireSDfileinfo
H5Dget_info
H5Rget_num_relations
H5Gget_num_contents
HDF5 interface is more granular and flexible
Update a writeable file with current changesncsync
AIO_flushH5MflushHDF5 interface is more flexible because it can be applied to parts of the -file hierarchy instead of the whole file at once. The SD interface does not -have this feature, although most of the lower HDF library supports it.
Close file access without applying recent changesncabort


How useful is this feature?
Create new dimensionncdimdefSDsetdimname
H5McreateSD interface actually creates dimensions with datasets, this just allows -naming them
Get ID of existing dimensionncdimidSDgetdimid
H5MaccessSD interface looks up dimensions by index and the netCDF interface uses -names, but they are close enough. The HDF5 interface does not current allow -access to particular dimensions, only the dataspace as a whole.
Get size & name of dimensionncdiminqSDdiminfo
H5Mget_name
H5Sget_lrank
Only a rough match
Rename dimensionncdimrenameSDsetdimname
H5Mset_name
Create a new datasetncvardefSDcreateAIO_mkarrayH5Mcreate
Attach to an existing datasetncvaridSDselectAIO_arr_loadH5Maccess
Get basic information about a datasetncvarinqSDgetinfoAIO_arr_get_btype
AIO_arr_get_nelmts
AIO_arr_get_nbdims
AIO_arr_get_bdims
AIO_arr_get_slab
H5Dget_infoAll interfaces have different levels of information that they return, some -use of auxilliary functions is required to get equivalent amount of information
Write a single value to a datasetncvarput1SDwritedataAIO_writeH5DwriteWhat is this useful for?
Read a single value from a datasetncvarget1SDreaddataAIO_readH5DreadWhat is this useful for?
Write a solid hyperslab of data (i.e. subset) to a datasetncvarputSDwritedataAIO_writeH5Dwrite
Read a solid hyperslab of data (i.e. subset) from a datasetncvargetSDreaddataAIO_readH5Dread
Write a general hyperslab of data (i.e. possibly subsampled) to a datasetncvarputgSDwritedataAIO_writeH5Dwrite
Read a general hyperslab of data (i.e. possibly subsampled) from a datasetncvargetgSDreaddataAIO_readH5Dread
Rename a dataset variablencvarrename

H5Mset_name
Add an attribute to a datasetncattputSDsetattr
H5Rattach_oidHDF5 requires creating a seperate object to attach to a dataset, but it also -allows objects to be attributes of any other object, even nested.
Get attribute informationncattinqSDattrinfo
H5Dget_infoHDF5 has no specific function for attributes, they are treated as all other -objects in the file.
Retrieve attribute for a datasetncattgetSDreadattr
H5DreadHDF5 uses general dataset I/O for attributes.
Copy attribute from one dataset to anotherncattcopy


What is this used for?
Get name of attributencattnameSDattrinfo
H5Mget_name
Rename attributencattrename

H5Mset_name
Delete attributencattdel

H5MdeleteThis can be faked in current HDF interface with lower-level calls
Compute # of bytes to store a number-typenctypelenDFKNTsize

Hmm, the HDF5 Datatype interface needs this functionality.
Indicate that fill-values are to be written to datasetncsetfillSDsetfillmode

HDF5 Datatype interface should work on this functionality
Get information about "record" variables (Those datasets which share the -same unlimited dimensionncrecinq


This should probably be wrapped in a higher layer interface, if it's -needed for HDF5.
Get a record from each dataset sharing the unlimited dimensionncrecget


This is somewhat equivalent to reading a vdata with non-interlaced -fields, only in a dataset oriented way. This should also be wrapped in a -higher layer interface if it's necessary for HDF5.
Put a record from each dataset sharing the unlimited dimensionncrecput


This is somewhat equivalent to writing a vdata with non-interlaced -fields, only in a dataset oriented way. This should also be wrapped in a -higher layer interface if it's necessary for HDF5.
Map a dataset's name to an index to reference it with
SDnametoindex
H5Mfind_nameEquivalent functionality except HDF5 call returns an OID instead of an -index.
Get the valid range of values for data in a dataset
SDgetrange

Easily implemented with attributes at a higher level for HDF5.
Release access to a dataset
SDendaccessAIO_arr_destroyH5MreleaseOdd that the netCDF API doesn't have this...
Set the valid range of data in a dataset
SDsetrange

Easily implemented with attributes at a higher level for HDF5.
Set the label, units, format, etc. of the data values in a dataset
SDsetdatastrs

Easily implemented with attributes at a higher level for HDF5.
Get the label, units, format, etc. of the data values in a dataset
SDgetdatastrs

Easily implemented with attributes at a higher level for HDF5.
Set the label, units, format, etc. of the dimensions in a dataset
SDsetdimstrs

Easily implemented with attributes at a higher level for HDF5.
Get the label, units, format, etc. of the dimensions in a dataset
SDgetdimstrs

Easily implemented with attributes at a higher level for HDF5.
Set the scale of the dimensions in a dataset
SDsetdimscale

Easily implemented with attributes at a higher level for HDF5.
Get the scale of the dimensions in a dataset
SDgetdimscale

Easily implemented with attributes at a higher level for HDF5.
Set the calibration parameters of the data values in a dataset
SDsetcal

Easily implemented with attributes at a higher level for HDF5.
Get the calibration parameters of the data values in a dataset
SDgetcal

Easily implemented with attributes at a higher level for HDF5.
Set the fill value for the data values in a dataset
SDsetfillvalue

HDF5 needs something like this, I'm not certain where to put it.
Get the fill value for the data values in a dataset
SDgetfillvalue

HDF5 needs something like this, I'm not certain where to put it.
Move/Set the dataset to be in an 'external' file
SDsetexternalfile
H5Dset_storageHDF5 has simple functions for this, but needs an API for setting up the -storage flow.
Move/Set the dataset to be stored using only certain bits from the dataset
SDsetnbitdataset
H5Dset_storageHDF5 has simple functions for this, but needs an API for setting up the -storage flow.
Move/Set the dataset to be stored in compressed form
SDsetcompress
H5Dset_storageHDF5 has simple functions for this, but needs an API for setting up the -storage flow.
Search for an dataset attribute with particular name
SDfindattr
H5Mfind_name
H5Mwild_search
HDF5 can handle wildcard searchs for this feature.
Map a run-time dataset handle to a persistant disk reference
SDidtoref

I'm not certain this is needed for HDF5.
Map a persistant disk reference for a dataset to an index in a group
SDreftoindex

I'm not certain this is needed for HDF5.
Determine if a dataset is a 'record' variable (i.e. it has an unlimited dimension)
SDisrecord

Easily implemented by querying the dimensionality at a higher level for HDF5.
Determine if a dataset is a 'coordinate' variable (i.e. it is used as a dimension)
SDiscoord

I'm not certain this is needed for HDF5.
Set the access type (i.e. parallel or serial) for dataset I/O
SDsetaccesstype

HDF5 has functions for reading the information about this, but needs a better -API for setting up the storage flow.
Set the size of blocks used to store a dataset with unlimited dimensions
SDsetblocksize

HDF5 has functions for reading the information about this, but needs a better -API for setting up the storage flow.
Sets backward compatibility of dimensions created.
SDsetdimval_comp

Unneccessary in HDF5.
Checks backward compatibility of dimensions created.
SDisdimval_comp

Unneccessary in HDF5.
Move/Set the dataset to be stored in chunked form
SDsetchunk
H5Dset_storageHDF5 has simple functions for this, but needs an API for setting up the -storage flow.
Get the chunking information for a dataset stored in chunked form
SDgetchunkinfo
H5Dstorage_detail
Read/Write chunks of a dataset using a chunk index
SDreadchunk
SDwritechunk


I'm not certain that HDF5 needs something like this.
Tune chunk caching parameters for chunked datasets
SDsetchunkcache

HDF5 needs something like this.
Change some default behavior of the library

AIO_defaults
Something like this would be useful in HDF5, to tune I/O pipelines, etc.
Flush and close all open files

AIO_exit
Something like this might be useful in HDF5, although it could be - encapsulated with a higher-level function.
Target an architecture for data-type storage

AIO_target
There are some rough parallels with using the data-type in HDF5 to create - data-type objects which can be used to write out future datasets.
Map a filename to a file ID

AIO_filenameH5Mget_name
Get the active directory (where new datasets are created)

AIO_getcwd
HDF5 allows multiple directories (groups) to be attached to, any of which - can have new datasets created within it.
Change active directory

AIO_chdir
Since HDF5 has a slightly different access method for directories (groups), - this functionality can be wrapped around calls to H5Gget_oid_by_name.
Create directory

AIO_mkdirH5Mcreate
Return detailed information about an object

AIO_statH5Dget_info
H5Dstorage_detail
Perhaps more information should be provided through another function in - HDF5?
Get "flag" information

AIO_getflags
Not required in HDF5.
Set "flag" information

AIO_setflags
Not required in HDF5.
Get detailed information about all objects in a directory

AIO_lsH5Gget_content_info_mult
H5Dget_info
H5Dstorage_detail
Only roughly equivalent functionality in HDF5, perhaps more should be - added?
Get base type of object

AIO_BASICH5Gget_content_info
Set base type of dataset

AIO_arr_set_btypeH5Mcreate(DATATYPE)
Set dimensionality of dataset

AIO_arr_set_bdimsH5Mcreate(DATASPACE)
Set slab of dataset to write

AIO_arr_set_slab
This is similar to the process of creating a dataspace for use when - performing I/O on an HDF5 dataset
Describe chunking of dataset to write

AIO_arr_set_chunkH5Dset_storage
Describe array index permutation of dataset to write

AIO_arr_set_permH5Dset_storage
Create a new dataset with dataspace and datatype information from an - existing dataset.

AIO_arr_copy
This can be mimicked in HDF5 by attaching to the datatype and dataspace of -an existing dataset and using the IDs to create new datasets.
Create a new directory to group objects within

AIO_mkgroupH5Mcreate(GROUP)
Read name of objects in directory

AIO_read_groupH5Gget_content_info_mult
Add objects to directory

AIO_write_groupH5Ginsert_item_mult
Combine an architecture and numeric type to derive the format's datatype

AIO_COMBINE
This is a nice feature to add to HDF5.
Derive an architecture from the format's datatype

AIO_ARCH
This is a nice feature to add to HDF5.
Derive a numeric type from the format's datatype

AIO_PNT
This is a nice feature to add to HDF5.
Register error handling function for library to call when errors occur

AIO_error_handler
This should be added to HDF5.
- diff --git a/doc/html/H5.format.html b/doc/html/H5.format.html deleted file mode 100644 index 8d2ba87..0000000 --- a/doc/html/H5.format.html +++ /dev/null @@ -1,5956 +0,0 @@ - - - - HDF5 File Format Specification - - - - - - - - - - - - -
-
- - - -
- HDF5 documents and links 
- Introduction to HDF5 
- -
- HDF5 User's Guide 
- HDF5 Reference Manual 
- HDF5 Application Developer's Guide 
-
-
-
-

HDF5 File Format Specification

- -
- - - -
-
    -
  1. Introduction -
  2. Disk Format Level 0 - File Metadata - -
      -
    1. Disk Format Level 0A - File Signature and Super Block -
    2. Disk Format Level 0B - File Driver Info -
    -
    -
  3. Disk Format Level 1 - File Infrastructure - -
      -
    1. Disk Format Level 1A - B-link Trees and B-tree Nodes -
    2. Disk Format Level 1B - Group -
    3. Disk Format Level 1C - Group Entry -
    4. Disk Format Level 1D - Local Heaps -
    5. Disk Format Level 1E - Global Heap -
    6. Disk Format Level 1F - Free-space Index -
    -
    -
  4. Disk Format Level 2 - Data Objects - -
      -
    1. Disk Format Level 2a - Data Object Headers -
        -
      1. Name: NIL -
      2. Name: Simple Dataspace - -
      3. Name: Reserved - not assigned yet -
      4. Name: Datatype -
      5. Name: Data Storage - Fill Value (Old) -
      6. Name: Data Storage - Fill Value -
      -
    -
    -
-
   -
    - -
  1. Disk Format Level 2 - Data Objects - (Continued) -
      -
    1. Disk Format Level 2a - Data Object Headers(Continued) -
        - -
      1. Name: Reserved - not assigned yet -
      2. Name: Data Storage - External Data Files -
      3. Name: Data Storage - Layout -
      4. Name: Reserved - not assigned yet -
      5. Name: Reserved - not assigned yet -
      6. Name: Data Storage - Filter Pipeline -
      7. Name: Attribute -
      8. Name: Object Comment -
      9. Name: Object Modification Date and Time (Old) -
      10. Name: Shared Object Message -
      11. Name: Object Header Continuation -
      12. Name: Group Message -
      13. Name: Object Modification Date and Time -
      -
    2. Disk Format: Level 2b - Shared Data Object Headers -
    3. Disk Format: Level 2c - Data Object Data Storage -
    -
    -
  2. Appendix -
-
-
- -
-
- - -

Introduction

- - - - - - - -
  -
- HDF5 Groups -
 
  - Figure 1: Relationships among the HDF5 root group, other groups, and objects -
-
 
  - HDF5 Objects -  
  - Figure 2: HDF5 objects -- datasets, datatypes, or dataspaces -
-
 
- - -

The format of an HDF5 file on disk encompasses several - key ideas of the HDF4 and AIO file formats as well as - addressing some shortcomings therein. The new format is - more self-describing than the HDF4 format and is more - uniformly applied to data objects in the file. - -

An HDF5 file appears to the user as a directed graph. - The nodes of this graph are the higher-level HDF5 objects - that are exposed by the HDF5 APIs: - -

    -
  • Groups -
  • Datasets -
  • Named datatypes -
- -

At the lowest level, as information is actually written to the disk, - an HDF5 file is made up of the following objects: -

    -
  • A super block -
  • B-tree nodes (containing either symbol nodes or raw data chunks) -
  • Object headers -
  • A global heap -
  • Local heaps -
  • Free space -
- -

The HDF5 library uses these low-level objects to represent the - higher-level objects that are then presented to the user or - to applications through the APIs. - For instance, a group is an object header that contains a message that - points to a local heap and to a B-tree which points to symbol nodes. - A dataset is an object header that contains messages that describe - datatype, space, layout, filters, external files, fill value, etc - with the layout message pointing to either a raw data chunk or to a - B-tree that points to raw data chunks. - - -

This Document

- -

This document describes the lower-level data objects; - the higher-level objects and their properties are described - in the HDF5 User's Guide. - -

Three levels of information comprise the file format. - Level 0 contains basic information for identifying and - defining information about the file. Level 1 information contains - the information about the pieces of a file shared by many objects - in the file (such as a B-trees and heaps). Level 2 is the rest - of the file and contains all of the data objects, with each object - partitioned into header information, also known as - metadata, and data. - -

The sizes of various fields in the following layout tables are - determined by looking at the number of columns the field spans - in the table. There are three exceptions: (1) The size may be - overridden by specifying a size in parentheses, (2) the size of - addresses is determined by the Size of Offsets field - in the super block and is indicated in this document with a - superscripted 'O', and (3) the size of length fields is determined - by the Size of Lengths field in the super block and is - indicated in this document with a superscripted 'L'. - -

Values for all fields in this document should be treated as unsigned - integers, unless otherwise noted in the description of a field. - Additionally, all metadata fields are stored in little-endian byte - order. -

- -
-
- -

- Disk Format: Level 0 - File Metadata

- -

- Disk Format: Level 0A - File Signature and Super Block

- -

The super block may begin at certain predefined offsets within - the HDF5 file, allowing a block of unspecified content for - users to place additional information at the beginning (and - end) of the HDF5 file without limiting the HDF5 library's - ability to manage the objects within the file itself. This - feature was designed to accommodate wrapping an HDF5 file in - another file format or adding descriptive information to the - file without requiring the modification of the actual file's - information. The super block is located by searching for the - HDF5 file signature at byte offset 0, byte offset 512 and at - successive locations in the file, each a multiple of two of - the previous location, i.e. 0, 512, 1024, 2048, etc. - -

The super block is composed of a file signature, followed by - super block and group version numbers, information - about the sizes of offset and length values used to describe - items within the file, the size of each group page, - and a group entry for the root object in the file. - -
-

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- HDF5 Super Block Layout -
bytebytebytebyte

HDF5 File Signature (8 bytes)

Version # of Super BlockVersion # of Global Free-space StorageVersion # of Root Group Symbol Table EntryReserved (zero)
Version # of Shared Header Message FormatSize of OffsetsSize of LengthsReserved (zero)
Group Leaf Node KGroup Internal Node K
File Consistency Flags
Indexed Storage Internal Node K1Reserved (zero)1
Base AddressO
Address of Global Free-space HeapO
End of File AddressO
Driver Information Block AddressO
Root Group Symbol Table Entry
- - - - -
- (Items marked with an 'O' the above table are -
- of the size specified in "Size of Offsets.") -
- (Items marked with an '1' the above table are -
- new in version 1 of the superblock) -
-
- -
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Field NameDescription
HDF5 File Signature -

This field contains a constant value and can be used to - quickly identify a file as being an HDF5 file. The - constant value is designed to allow easy identification of - an HDF5 file and to allow certain types of data corruption - to be detected. The file signature of an HDF5 file always - contains the following values: -

- -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Decimal:13772687013102610
Hexadecimal:894844460d0a1a0a
ASCII C Notation:\211HDF\r\n\032\n
-
-
- -

This signature both identifies the file as an HDF5 file - and provides for immediate detection of common - file-transfer problems. The first two bytes distinguish - HDF5 files on systems that expect the first two bytes to - identify the file type uniquely. The first byte is - chosen as a non-ASCII value to reduce the probability - that a text file may be misrecognized as an HDF5 file; - also, it catches bad file transfers that clear bit - 7. Bytes two through four name the format. The CR-LF - sequence catches bad file transfers that alter newline - sequences. The control-Z character stops file display - under MS-DOS. The final line feed checks for the inverse - of the CR-LF translation problem. (This is a direct - descendent of the PNG file - signature.) -

- -

This field is present in version 0+ of the superblock. -

-
Version Number of the Super Block -

This value is used to determine the format of the - information in the super block. When the format of the - information in the super block is changed, the version number - is incremented to the next integer and can be used to - determine how the information in the super block is - formatted. -

- -

Values of 0 and 1 are defined for this field. -

- -

This field is present in version 0+ of the superblock. -

-
Version Number of the File Free-space Information -

This value is used to determine the format of the - information in the File Free-space Information. -

-

The only value currently valid in this field is '0', which - indicates that the free space index is formatted as described - below. -

- -

This field is present in version 0+ of the superblock. -

-
Version Number of the Root Group Symbol Table Entry -

This value is used to determine the format of the - information in the Root Group Symbol Table Entry. When the - format of the information in that field is changed, the - version number is incremented to the next integer and can be - used to determine how the information in the field - is formatted. -

-

The only value currently valid in this field is '0', which - indicates that the root group symbol table entry is formatted as - described below. -

- -

This field is present in version 0+ of the superblock. -

-
Version Number of the Shared Header Message Format -

This value is used to determine the format of the - information in a shared object header message, which is - stored in the global small-data heap. Since the format - of the shared header messages differs from the private - header messages, a version number is used to identify changes - in the format. -

-

The only value currently valid in this field is '0', which - indicates that shared header messages are formatted as - described below. -

- -

This field is present in version 0+ of the superblock. -

-
Size of Offsets -

This value contains the number of bytes used to store - addresses in the file. The values for the addresses of - objects in the file are offsets relative to a base address, - usually the address of the super block signature. This - allows a wrapper to be added after the file is created - without invalidating the internal offset locations. -

- -

This field is present in version 0+ of the superblock. -

-
Size of Lengths -

This value contains the number of bytes used to store - the size of an object. -

- -

This field is present in version 0+ of the superblock. -

-
Group Leaf Node K -

Each leaf node of a group B-tree will have at - least this many entries but not more than twice this - many. If a group has a single leaf node then it - may have fewer entries. -

-

This value must be greater than zero. -

-

See the description of B-trees below. -

- -

This field is present in version 0+ of the superblock. -

-
Group Internal Node K -

Each internal node of a group B-tree will have at - least this many entries but not more than twice this - many. If the group has only one internal - node then it might have fewer entries. -

-

This value must be greater than zero. -

-

See the description of B-trees below. -

- -

This field is present in version 0+ of the superblock. -

-
File Consistency Flags -

This value contains flags to indicate information - about the consistency of the information contained - within the file. Currently, the following bit flags are - defined: -

    -
  • Bit 0 set indicates that the file is opened for - write-access. -
  • Bit 1 set indicates that the file has - been verified for consistency and is guaranteed to be - consistent with the format defined in this document. -
  • Bits 2-31 are reserved for future use. -
- Bit 0 should be - set as the first action when a file is opened for write - access and should be cleared only as the final action - when closing a file. Bit 1 should be cleared during - normal access to a file and only set after the file's - consistency is guaranteed by the library or a - consistency utility. -

- -

This field is present in version 0+ of the superblock. -

-
Indexed Storage Internal Node K -

Each internal node of a indexed storage B-tree will have at - least this many entries but not more than twice this - many. If the group has only one internal - node then it might have fewer entries. -

-

This value must be greater than zero. -

-

See the description of B-trees below. -

- -

This field is present in version 1+ of the superblock. -

-
Base Address -

This is the absolute file address of the first byte of - the HDF5 data within the file. The library currently - constrains this value to be the absolute file address - of the super block itself when creating new files; - future versions of the library may provide greater - flexibility. When opening an existing file and this address does - not match the offset of the superblock, the library assumes - that the entire contents of the HDF5 file have been adjusted in - the file and adjusts the base address and end of file address to - reflect their new positions in the file. Unless otherwise noted, - all other file addresses are relative to this base - address. -

- -

This field is present in version 0+ of the superblock. -

-
Address of Global Free-space Index -

Free-space management is not yet defined in the HDF5 - file format and is not handled by the library. - Currently this field always contains the - undefined address. -

- -

This field is present in version 0+ of the superblock. -

-
End of File Address -

This is the absolute file address of the first byte past - the end of all HDF5 data. It is used to determine whether a - file has been accidently truncated and as an address where - file data allocation can occur if space from the free list is - not used. -

- -

This field is present in version 0+ of the superblock. -

-
Driver Information Block Address -

This is the relative file address of the file driver - information block which contains driver-specific - information needed to reopen the file. If there is no - driver information block then this entry should be the - undefined address. -

- -

This field is present in version 0+ of the superblock. -

-
Root Group Symbol Table Entry -

This is the symbol table entry - of the root group, which serves as the entry point into - the group graph for the file. -

- -

This field is present in version 0+ of the superblock. -

-
-
- -

- Disk Format: Level 0B - File Driver Info

- -

The file driver information block is an optional region of the - file which contains information needed by the file driver in - order to reopen a file. The format of the file driver information - block is: - -
-

- - - - - - - - - - - - - - - - - - - - - - - - - - -
- Driver Information Block -
bytebytebytebyte
VersionReserved (zero)
Driver Information Size (4 bytes)

Driver Identification (8 bytes)



Driver Information (n bytes)


-
- -
-
- - - - - - - - - - - - - - - - - - - - - - - - - -
Field NameDescription
Version -

The version number of the driver information block. The - file format documented here is version zero. -

-
Driver Information Size -

The size in bytes of the Driver Information part of this - structure. -

-
Driver Identification -

This is an eight-byte ASCII string without null - termination which identifies the driver and version number - of the Driver Information block. The predefined drivers - supplied with the HDF5 library are identified by the - letters NCSA followed by the first four characters of - the driver name. If the Driver Information block is not - the original version then the last letter(s) of the - identification will be replaced by a version number in - ASCII. -

-

- For example, the various versions of the family driver - will be identified by NCSAfami, NCSAfam0, - NCSAfam1, etc. - (NCSAfami is simply NCSAfamily truncated - to eight characters. Subsequent identifiers will be created by - substituting sequential numerical values for the final character, - starting with zero.) -

-

- Identification for user-defined drivers - is arbitrary but should be unique and avoid the four character - prefix "NCSA". -

-
Driver InformationDriver information is stored in a format defined by the - file driver and encoded/decoded by the driver callbacks - invoked from the H5FD_sb_encode and - H5FD_sb_decode functions.
-
- -
-
- -

- Disk Format: Level 1 - File Infrastructure

-

Disk Format: Level 1A - B-link Trees and B-tree Nodes

- -

B-link trees allow flexible storage for objects which tend to grow - in ways that cause the object to be stored discontiguously. B-trees - are described in various algorithms books including "Introduction to - Algorithms" by Thomas H. Cormen, Charles E. Leiserson, and Ronald - L. Rivest. The B-link tree, in which the sibling nodes at a - particular level in the tree are stored in a doubly-linked list, - is described in the "Efficient Locking for Concurrent Operations - on B-trees" paper by Phillip Lehman and S. Bing Yao as published - in the ACM Transactions on Database Systems, Vol. 6, - No. 4, December 1981. - -

The B-link trees implemented by the file format contain one more - key than the number of children. In other words, each child - pointer out of a B-tree node has a left key and a right key. - The pointers out of internal nodes point to sub-trees while - the pointers out of leaf nodes point to symbol nodes and - raw data chunks. - Aside from that difference, internal nodes and leaf nodes - are identical. - -
-

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- B-tree Nodes -
bytebytebytebyte
Signature
Node TypeNode LevelEntries Used
Address of Left SiblingO
Address of Right SiblingO
Key 0 (variable size)
Address of Child 0O
Key 1 (variable size)
Address of Child 1O
...
Key 2K (variable size)
Address of Child 2KO
Key 2K+1 (variable size)
- - - -
- (Items marked with an 'O' the above table are -
- of the size specified in "Size of Offsets.") -
-
- -
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Field NameDescription
Signature -

The ASCII character string "TREE" is - used to indicate the - beginning of a B-link tree node. This gives file - consistency checking utilities a better chance of - reconstructing a damaged file. -

-
Node Type -

Each B-link tree points to a particular type of data. - This field indicates the type of data as well as - implying the maximum degree K of the tree and - the size of each Key field. -

- - - - - - - - - - - - - - -
Node TypeDescription
0This tree points to group nodes.
1This tree points to raw data chunk nodes.
-
Node Level -

The node level indicates the level at which this node - appears in the tree (leaf nodes are at level zero). Not - only does the level indicate whether child pointers - point to sub-trees or to data, but it can also be used - to help file consistency checking utilities reconstruct - damanged trees. -

-
Entries Used -

This determines the number of children to which this - node points. All nodes of a particular type of tree - have the same maximum degree, but most nodes will point - to less than that number of children. The valid child - pointers and keys appear at the beginning of the node - and the unused pointers and keys appear at the end of - the node. The unused pointers and keys have undefined - values. -

-
Address of Left Sibling -

This is the relative file address of the left sibling of - the current node. If the current - node is the left-most node at this level then this field - is the undefined address. -

-
Address of Right Sibling -

This is the relative file address of the right sibling of - the current node. If the current - node is the right-most node at this level then this - field is the undefined address. -

-
Keys and Child Pointers -

Each tree has 2K+1 keys with 2K - child pointers interleaved between the keys. The number - of keys and child pointers actually containing valid - values is determined by the node's Entries Used field. - If that field is N then the B-link tree contains - N child pointers and N+1 keys. -

-
Key -

The format and size of the key values is determined by - the type of data to which this tree points. The keys are - ordered and are boundaries for the contents of the child - pointer; that is, the key values represented by child - N fall between Key N and Key - N+1. Whether the interval is open or closed on - each end is determined by the type of data to which the - tree points. -

- -

- The format of the key depends on the node type. - For nodes of node type 0 (group nodes), the key is formatted as - follows: -

- - - - - -
A single field of Size of Lengths - bytes:Indicates the byte offset into the local heap - for the first object name in the subtree which - that key describes. -
-
-

- -

- For nodes of node type 1 (chunked raw data nodes), the key is - formatted as follows: -

- - - - - - - - - - - - - -
Bytes 1-4:Size of chunk in bytes.
Bytes 4-8:Filter mask, a 32-bit bitfield indicating which - filters have been skipped for this chunk. Each filter - has an index number in the pipeline (starting at 0, with - the first filter to apply) and if that filter is skipped, - the bit corresponding to it's index is set.
N 64-bit fields:A 64-bit index indicating the offset of the - chunk within the dataset where N is the number - of dimensions of the dataset. For example, if - a chunk in a 3-dimensional dataset begins at the - position [5,5,5], there will be three - such 64-bit indices, each with the value of - 5.
-
-

-
Child Pointer -

The tree node contains file addresses of subtrees or - data depending on the node level. Nodes at Level 0 point - to data addresses, either raw data chunk or group nodes. - Nodes at non-zero levels point to other nodes of the - same B-tree. -

-

For raw data chunk nodes, the child pointer is the address - of a single raw data chunk. For group nodes, the child pointer - points to a symbol table, which contains - information for multiple symbol table entries. -

-
-
- -

- Conceptually, each B-tree node looks like this: -

- - - - - - - - - - - - - -
key[0] child[0] key[1] child[1] key[2] ... ... key[N-1] child[N-1] key[N]
-
-
- - where child[i] is a pointer to a sub-tree (at a level - above Level 0) or to data (at Level 0). - Each key[i] describes an item stored by the B-tree - (a chunk or an object of a group node). The range of values - represented by child[i] is indicated by key[i] - and key[i+1]. - - -

The following question must next be answered: - "Is the value described by key[i] contained in - child[i-1] or in child[i]?" - The answer depends on the type of tree. - In trees for groups (node type 0) the object described by - key[i] is the greatest object contained in - child[i-1] while in chunk trees (node type 1) the - chunk described by key[i] is the least chunk in - child[i]. - -

That means that key[0] for group trees is sometimes unused; - it points to offset zero in the heap, which is always the - empty string and compares as "less-than" any valid object name. - -

And key[N] for chunk trees is sometimes unused; - it contains a chunk offset which compares as "greater-than" - any other chunk offset and has a chunk byte size of zero - to indicate that it is not actually allocated. - - -

Disk Format: Level 1B - Group and Symbol Nodes

- -

A group is an object internal to the file that allows - arbitrary nesting of objects within the file (including other groups). - A group maps a set of names in the group to a set of relative - file addresses where objects with those names are located in - the file. Certain metadata for an object to which the group points - can be cached in the group's symbol table in addition to the - object's header. - -

An HDF5 object name space can be stored hierarchically by - partitioning the name into components and storing each - component in a group. The group entry for a - non-ultimate component points to the group containing - the next component. The group entry for the last - component points to the object being named. - -

A group is a collection of group nodes pointed - to by a B-link tree. Each group node contains entries - for one or more symbols. If an attempt is made to add a - symbol to an already full group node containing - 2K entries, then the node is split and one node - contains K symbols and the other contains - K+1 symbols. - -
-

- - - - - - - - - - - - - - - - - - - -
- Group Node (A Leaf of a B-tree) -
bytebytebytebyte
Signature
Version NumberReserved (0)Number of Symbols


Group Entries


-
- -
-
- - - - - - - - - - - - - - - - - - - - - - - - - -
Field NameDescription
Signature -

The ASCII character string "SNOD" is - used to indicate the - beginning of a group node. This gives file - consistency checking utilities a better chance of - reconstructing a damaged file. -

-
Version Number -

The version number for the group node. This - document describes version 1. (There is no version '0' - of the group node) -

-
Number of Symbols -

Although all group nodes have the same length, - most contain fewer than the maximum possible number of - symbol entries. This field indicates how many entries - contain valid data. The valid entries are packed at the - beginning of the group node while the remaining - entries contain undefined values. -

-
Group Entries -

Each symbol has an entry in the group node. - The format of the entry is described below. - There are 2K entries in each group node, where - K is the "Group Leaf Node K" value from the - super block. -

-
-
- -

- Disk Format: Level 1C - Group Entry

- -

Each group entry in a group node is designed - to allow for very fast browsing of stored objects. - Toward that design goal, the group entries - include space for caching certain constant metadata from the - object header. - -
-

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- Group Entry -
bytebytebytebyte
Name OffsetO
Object Header AddressO
Cache Type
Reserved


Scratch-pad Space (16 bytes)


- - - -
- (Items marked with an 'O' the above table are -
- of the size specified in "Size of Offsets.") -
-
- -
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Field NameDescription
Name Offset -

This is the byte offset into the group local - heap for the name of the object. The name is null - terminated. -

-
Object Header Address -

Every object has an object header which serves as a - permanent location for the object's metadata. In addition - to appearing in the object header, some metadata can be - cached in the scratch-pad space. -

-
Cache Type -

The cache type is determined from the object header. - It also determines the format for the scratch-pad space: -
- - - - - - - - - - - - - - - - - - - - - -
Type:Description:
0No data is cached by the group entry. This - is guaranteed to be the case when an object header - has a link count greater than one. -
1Object header metadata is cached in the group - entry. This implies that the group - entry refers to another group. -
2The entry is a symbolic link. The first four bytes - of the scratch-pad space are the offset into the local - heap for the link value. The object header address - will be undefined. -
NOther cache values can be defined later and - libraries that do not understand the new values will - still work properly. -
-

-
Reserved -

These four bytes are present so that the scratch-pad - space is aligned on an eight-byte boundary. They are - always set to zero. -

-
Scratch-pad Space -

This space is used for different purposes, depending - on the value of the Cache Type field. Any metadata - about a dataset object represented in the scratch-pad - space is duplicated in the object header for that - dataset. This metadata can include the datatype - and the size of the dataspace for a dataset whose datatype - is atomic and whose dataspace is fixed and less than - four dimensions. -

-

- Furthermore, no data is cached in the group - entry scratch-pad space if the object header for - the group entry has a link count greater than - one. -

-
-
- -

Format of the Scratch-pad Space

- -

The group entry scratch-pad space is formatted - according to the value in the Cache Type field. - -

If the Cache Type field contains the value zero - (0) then no information is - stored in the scratch-pad space. - -

If the Cache Type field contains the value one - (1), then the scratch-pad space - contains cached metadata for another object header - in the following format: - -
-

- - - - - - - - - - - - - - -
- Object Header Scratch-pad Format -
bytebytebytebyte
Address of B-treeO
Address of Name HeapO
- - - -
- (Items marked with an 'O' the above table are -
- of the size specified in "Size of Offsets.") -
-
- -
-
- - - - - - - - - - - - - - - -
Field NameDescription
Address of B-tree -

This is the file address for the root of the - group's B-tree. -

-
Address of Name Heap -

This is the file address for the group's local - heap, in which are stored the group's symbol names. -

-
-
- - -

If the Cache Type field contains the value two - (2), then the scratch-pad space - contains cached metadata for another symbolic link - in the following format: - -
-

- - - - - - - - - - - - - -
- Symbolic Link Scratch-pad Format -
bytebytebytebyte
Offset to Link Value
-
- -
-
- - - - - - - - - - -
Field NameDescription
Offset to Link Value -

The value of a symbolic link (that is, the name of the - thing to which it points) is stored in the local heap. - This field is the 4-byte offset into the local heap for - the start of the link value, which is null terminated. -

-
-
- -

Disk Format: Level 1D - Local Heaps

- -

A heap is a collection of small heap objects. Objects can be - inserted and removed from the heap at any time. - The address of a heap does not change once the heap is created. - References to objects are stored in the group table; - the names of those objects are stored in the local heap. -

- -
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- Local Heap -
bytebytebytebyte
Signature
VersionReserved (zero)
Data Segment SizeL
Offset to Head of Free-listL
Address of Data SegmentO
- - - - -
- (Items marked with an 'L' the above table are -
- of the size specified in "Size of Lengths.") -
- (Items marked with an 'O' the above table are -
- of the size specified in "Size of Offsets.") -
-
- -

-

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Field NameDescription
Signature -

The ASCII character string "HEAP" - is used to indicate the - beginning of a heap. This gives file consistency - checking utilities a better chance of reconstructing a - damaged file. -

-
Version -

Each local heap has its own version number so that new - heaps can be added to old files. This document - describes version zero (0) of the local heap. -

-
Data Segment Size -

The total amount of disk memory allocated for the heap - data. This may be larger than the amount of space - required by the objects stored in the heap. The extra - unused space in the heap holds a linked list of free blocks. -

-
Offset to Head of Free-list -

This is the offset within the heap data segment of the - first free block (or the - undefined address if there is no - free block). The free block contains "Size of Lengths" bytes that - are the offset of the next free block (or the - value '1' if this is the - last free block) followed by "Size of Lengths" bytes that store - the size of this free block. The size of the free block includes - the space used to store the offset of the next free block and - the of the current block, making the minimum size of a free block - 2 * "Size of Lengths". -

-
Address of Data Segment -

The data segment originally starts immediately after - the heap header, but if the data segment must grow as a - result of adding more objects, then the data segment may - be relocated, in its entirety, to another part of the - file. -

-
-
- -

Objects within the heap should be aligned on an 8-byte boundary. - -

Disk Format: Level 1E - Global Heap

- -

Each HDF5 file has a global heap which stores various types of - information which is typically shared between datasets. The - global heap was designed to satisfy these goals: - -

    -
  1. Repeated access to a heap object must be efficient without - resulting in repeated file I/O requests. Since global heap - objects will typically be shared among several datasets, it is - probable that the object will be accessed repeatedly. -
  2. Collections of related global heap objects should result in - fewer and larger I/O requests. For instance, a dataset of - object references will have a global heap object for each - reference. Reading the entire set of object references - should result in a few large I/O requests instead of one small - I/O request for each reference. -
  3. It should be possible to remove objects from the global heap - and the resulting file hole should be eligible to be reclaimed - for other uses. -
-

- -

The implementation of the heap makes use of the memory - management already available at the file level and combines that - with a new top-level object called a collection to - achieve Goal B. The global heap is the set of all collections. - Each global heap object belongs to exactly one collection and - each collection contains one or more global heap objects. For - the purposes of disk I/O and caching, a collection is treated as - an atomic object. -

- -

The HDF5 library creates global heap collections as needed, so there may - be multiple collections throughout the file. The set of all of them is - abstractly called the "global heap", although they don't actually link - to each other, and there is no global place in the file where you can - discover all of the collections. The collections are found simply by - finding a reference to one through another object in the file (eg. - variable-length datatype elements, etc). -

- -
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- A Global Heap Collection -
bytebytebytebyte
Signature
VersionReserved (zero)
Collection SizeL

Global Heap Object 1


Global Heap Object 2


...


Global Heap Object N


Global Heap Object 0 (free space)

- - - -
- (Items marked with an 'L' the above table are -
- of the size specified in "Size of Lengths.") -
-
- -
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Field NameDescription
Signature -

The ASCII character string "GCOL" - is used to indicate the - beginning of a collection. This gives file consistency - checking utilities a better chance of reconstructing a - damaged file. -

-
Version -

Each collection has its own version number so that new - collections can be added to old files. This document - describes version one (1) of the collections (there is no - version zero (0)). -

-
Collection Size -

This is the size in bytes of the entire collection - including this field. The default (and minimum) - collection size is 4096 bytes which is a typical file - system block size. This allows for 127 16-byte heap - objects plus their overhead (the collection header of 16 bytes - and the 16 bytes of information about each heap object). -

-
Global Heap Object 1 through N -

The objects are stored in any order with no - intervening unused space. -

-
Global Heap Object 0 -

Global Heap Object 0 (zero), when present, represents the free - space in the collection. Free space always appears at the end of - the collection. If the free space is too small to store the header - for Object 0 (described below) then the header is implied and the - collection contains no free space. -

-
-
- -
-
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- Global Heap Object -
bytebytebytebyte
Heap Object IDReference Count
Reserved
Object SizeL

Object Data

- - - -
- (Items marked with an 'L' the above table are -
- of the size specified in "Size of Lengths.") -
-
- -
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Field NameDescription
Heap Object ID -

Each object has a unique identification number within a - collection. The identification numbers are chosen so that - new objects have the smallest value possible with the - exception that the identifier 0 always refers to the - object which represents all free space within the - collection. -

-
Reference Count -

All heap objects have a reference count field. An - object which is referenced from some other part of the - file will have a positive reference count. The reference - count for Object 0 is always zero. -

-
Reserved -

Zero padding to align next field on an 8-byte boundary. -

-
Object Size -

This is the size of the object data stored for the object. - The actual storage space allocated for the object data is rounded - up to a multiple of eight. -

-
Object Data -

The object data is treated as a one-dimensional array - of bytes to be interpreted by the caller. -

-
-
- -

Disk Format: Level 1F - Free-space Index

- -

The free-space index is a collection of blocks of data, - dispersed throughout the file, which are currently not used by - any file objects. - -

The super block contains a pointer to root of the free-space description; - that pointer is currently required to be the - undefined address. - -

The format of the free-space index is not defined at this time. - - - -
-


- -

Disk Format: Level 2 - Data Objects

- -

Data objects contain the real information in the file. These - objects compose the scientific data and other information which - are generally thought of as "data" by the end-user. All the - other information in the file is provided as a framework for - these data objects. -

- -

A data object is composed of header information and data - information. The header information contains the information - needed to interpret the data information for the data object as - well as additional "metadata" or pointers to additional - "metadata" used to describe or annotate each data object. -

- -

- Disk Format: Level 2A - Data Object Headers

- -

The header information of an object is designed to encompass - all the information about an object, except for the data itself. - This information includes - the dataspace, datatype, information about how the data - is stored on disk (in external files, compressed, broken up in - blocks, etc.), as well as other information used by the library - to speed up access to the data objects or maintain a file's - integrity. Information stored by user applications as attributes - is also stored in the object's header. The header of each object is - not necessarily located immediately prior to the object's data in the - file and in fact may be located in any position in the file. The order - of the messages in an object header is not significant. -

- -

Header messages are aligned on 8-byte boundaries. -

- -
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- Object Headers -
bytebytebytebyte
VersionReserved (zero)Number of Header Messages
Object Reference Count
Object Header Size
Header Message Type #1Size of Header Message Data #1
Header Message #1 FlagsReserved (zero)

Header Message Data #1

.
.
.
Header Message Type #nSize of Header Message Data #n
Header Message #n FlagsReserved (zero)

Header Message Data #n

-
- -
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Field NameDescription
Version -

This value is used to determine the format of the - information in the object header. When the format of the - information in the object header is changed, the version number - is incremented and can be used to determine how the - information in the object header is formatted. This - document describes version one (1) (there was no version - zero (0)). -

-
Number of Header Messages -

This value determines the number of messages listed in - object headers for this object. This value includes the messages - in continuation messages for this object. -

-
Object Reference Count -

This value specifies the number of "hard links" to this object - within the current file. References to the object from external - files, "soft links" in this file and object references in this - file are not tracked. -

-
Object Header Size -

This value specifies the number of bytes of header message data - following this length field that contain object header messages - for this object header. This value does not include the size of - object header continuation blocks for this object elsewhere in the - file. -

-
Header Message Type -

This value specifies the type of information included in the - following header message data. The header message types for the - pre-defined header messages are included in sections below. -

-
Size of Header Message Data -

This value specifies the number of bytes of header - message data following the header message type and length - information for the current message. The size includes - padding bytes to make the message a multiple of eight - bytes. -

-
Header Message Flags -

This is a bit field with the following definition: - - - - - - - - - - - - - - - - - - -
BitDescription
0If set, the message data is constant. This is used - for messages like the datatype message of a dataset. -
1If set, the message is stored in the global heap. - The Header Message Data field contains a Shared Object - message and the Size of Header Message Data field - contains the size of that Shared Object message. -
2-7Reserved
-

-
Header Message Data -

The format and length of this field is determined by the - header message type and size respectively. Some header - message types do not require any data and this information - can be eliminated by setting the length of the message to - zero. The data is padded with enough zeros to make the - size a multiple of eight. -

-
-
- -

The header message types and the message data associated with - them compose the critical "metadata" about each object. Some - header messages are required for each object while others are - optional. Some optional header messages may also be repeated - several times in the header itself, the requirements and number - of times allowed in the header will be noted in each header - message description below. -

- -

The following is a list of currently defined header messages: -

- -
-

Name: NIL

- -

Header Message Type: 0x0000 -

-

Length: varies -

-

Status: Optional, may be repeated. -

-

Purpose and Description: The NIL message is used to indicate a - message which is to be ignored when reading the header messages for a - data object. [Possibly one which has been deleted for some reason.] -

-

Format of Data: Unspecified. -

- -
-

Name: Simple Dataspace

- -

Header Message Type: 0x0001 -

-

Length: Varies according to the number of dimensions, - as described in the following table. -

-

Status: Required for dataset objects, may not be - repeated. -

-

Description: The simple dataspace message describes the - number of dimensions (i.e. "rank") and size of each dimension that the - data object has. This message is only used for datasets which have a - simple, rectilinear grid layout; datasets requiring a more complex - layout (irregularly structured or unstructured grids, etc.) must use - the Complex Dataspace message for expressing the space the - dataset inhabits. (Note: The Complex Dataspace - functionality is not yet implemented and it is not described in this - document.) -

- -

Format of Data: -
-

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- Simple Dataspace Message -
bytebytebytebyte
VersionDimensionalityFlagsReserved
Reserved
Dimension #1 SizeL
.
.
.
Dimension #n SizeL
Dimension #1 Maximum SizeL
.
.
.
Dimension #n Maximum SizeL
Permutation Index #1L
.
.
.
Permutation Index #nL
- - - -
- (Items marked with an 'L' the above table are -
- of the size specified in "Size of Lengths.") -
-
- -
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Field NameDescription
Version -

This value is used to determine the format of the - Simple Dataspace Message. When the format of the - information in the message is changed, the version number - is incremented and can be used to determine how the - information in the object header is formatted. This - document describes version one (1) (there was no version - zero (0)). -

-
Dimensionality -

This value is the number of dimensions that the data - object has. -

-
Flags -

This field is used to store flags to indicate the - presence of parts of this message. Bit 0 (the least - significant bit) is used to indicate that maximum - dimensions are present. Bit 1 is used to indicate that - permutation indices are present. -

-
Dimension #n Size -

This value is the current size of the dimension of the - data as stored in the file. The first dimension stored in - the list of dimensions is the slowest changing dimension - and the last dimension stored is the fastest changing - dimension. -

-
Dimension #n Maximum Size -

This value is the maximum size of the dimension of the - data as stored in the file. This value may be the special - "unlimited" size which indicates - that the data may expand along this dimension indefinitely. - If these values are not stored, the maximum size of each - dimension is assumed to be the dimension's current size. -

-
Permutation Index #n -

This value is the index permutation used to map - each dimension from the canonical representation to an - alternate axis for each dimension. If these values are - not stored, the first dimension stored in the list of - dimensions is the slowest changing dimension and the last - dimension stored is the fastest changing dimension. -

-
-
- -

- - - -
-

Name: Reserved - Not Assigned Yet

- Header Message Type: 0x0002
- Length: N/A
- Status: N/A
- Format of Data: N/A
- -

Purpose and Description: This message type was skipped during - the initial specification of the file format and may be used in a - future expansion to the format. - - -


-

Name: Datatype

- -

Header Message Type: 0x0003 -

-

Length: variable -

-

Status: Required for dataset or named datatype objects, - may not be repeated. -

- -

Description: The datatype message defines the datatype - for each element of a dataset. A datatype can describe an atomic type - like a fixed- or floating-point type or a compound type like a C - struct. - Datatypes messages are stored - as a list of datatype classes and - their associated properties. -

- -

Datatype messages that are part of a dataset object, - do not describe how elements are related to one another, the dataspace - message is used for that purpose. Datatype messages that are part of - a named datatype message describe an "abstract" datatype that can be - used by other objects in the file. -

- -

Format of Data: -
-

- - - - - - - - - - - - - - - - - - - - - - - - -
- Datatype Message -
bytebytebytebyte
Class and VersionClass Bit Field, Bits 0-7Class Bit Field, Bits 8-15Class Bit Field, Bits 16-23
Size


Properties


-
- -
-
- - - - - - - - - - - - - - - - - - - - - - - - - - -
Field NameDescription
Class and Version -

The version of the datatype message and the datatype's class - information are packed together in this field. The version - number is packed in the top 4 bits of the field and the class - is contained in the bottom 4 bits. -

-

The version number information is used for changes in the - format of the datatype message and is described here: - - - - - - - - - - - - - - - - - - -
VersionDescription
0Never used -
1Used by early versions of the library to encode - compound datatypes with explicit array fields. - See the compound datatype description below for - further details. -
2The current version used by the library. -
-

-

The class of the datatype determines the format for the class - bit field and properties portion of the datatype message, which - are described below. The - following classes are currently defined: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ValueDescription
0Fixed-Point
1Floating-Point
2Time
3String
4Bitfield
5Opaque
6Compound
7Reference
8Enumerated
9Variable-Length
10Array
-

-
Class Bit Fields -

The information in these bit fields is specific to each datatype - class and is described below. All bits not defined for a - datatype class are set to zero. -

-
Size -

The size of the datatype in bytes. -

-
Properties -

This variable-sized field encodes information specific to each - datatype class and is described below. If there is no - property information specified for a datatype class, the size - of this field is zero. -

-
-
-

- -

Class specific information for Fixed-Point Numbers (Class 0): - -
-

- - - - - - - - - - - - - - - - - - - - - - - - - - - -
- Bit Field Description -
BitsMeaning
0Byte Order. If zero, byte order is little-endian; - otherwise, byte order is big endian.
1, 2Padding type. Bit 1 is the lo_pad type and bit 2 - is the hi_pad type. If a datum has unused bits at either - end, then the lo_pad or hi_pad bit is copied to those - locations.
3Signed. If this bit is set then the fixed-point - number is in 2's complement form.
4-23Reserved (zero).
-
- -
-
- - - - - - - - - - - - - - -
- Property Descriptions -
ByteByteByteByte
Bit OffsetBit Precision
-
- -
-
- - - - - - - - - - - - - - - - -
Field NameDescription
Bit Offset -

The bit offset of the first significant bit of the fixed-point - value within the datatype. The bit offset specifies the number - of bits "to the right of" the value. -

-
Bit Precision -

The number of bits of precision of the fixed-point value - within the datatype. -

-
-
-

- -

Class specific information for Floating-Point Numbers (Class 1): - -
-

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- Bit Field Description -
BitsMeaning
0Byte Order. If zero, byte order is little-endian; - otherwise, byte order is big endian.
1, 2, 3Padding type. Bit 1 is the low bits pad type, bit 2 - is the high bits pad type, and bit 3 is the internal bits - pad type. If a datum has unused bits at either end or between - the sign bit, exponent, or mantissa, then the value of bit - 1, 2, or 3 is copied to those locations.
4-5Normalization. The value can be 0 if there is no - normalization, 1 if the most significant bit of the - mantissa is always set (except for 0.0), and 2 if the most - signficant bit of the mantissa is not stored but is - implied to be set. The value 3 is reserved and will not - appear in this field.
6-7Reserved (zero).
8-15Sign Location. This is the bit position of the sign - bit. Bits are numbered with the least significant bit zero.
16-23Reserved (zero).
-
- -
-
- - - - - - - - - - - - - - - - - - - - - - - - - -
- Property Descriptions -
ByteByteByteByte
Bit OffsetBit Precision
Exponent LocationExponent SizeMantissa LocationMantissa Size
Exponent Bias
-
- -
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Field NameDescription
Bit Offset -

The bit offset of the first significant bit of the floating-point - value within the datatype. The bit offset specifies the number - of bits "to the right of" the value. -

-
Bit Precision -

The number of bits of precision of the floating-point value - within the datatype. -

-
Exponent Location -

The bit position of the exponent field. Bits are numbered with - the least significant bit number zero. -

-
Exponent Size -

The size of the exponent field in bits. -

-
Mantissa Location -

The bit position of the mantissa field. Bits are numbered with - the least significant bit number zero. -

-
Mantissa Size -

The size of the mantissa field in bits. -

-
Exponent Bias -

The bias of the exponent field. -

-
-
-

- -

Class specific information for Time (Class 2): - -
-

- - - - - - - - - - - - - - - - - -
- Bit Field Description -
BitsMeaning
0Byte Order. If zero, byte order is little-endian; - otherwise, byte order is big endian.
1-23Reserved (zero).
-
- -
-
- - - - - - - - - - - -
- Property Descriptions -
ByteByte
Bit Precision
-
- -
-
- - - - - - - - - - - -
Field NameDescription
Bit Precision -

The number of bits of precision of the time value. -

-
-
-

- -

Class specific information for Strings (Class 3): - -
-

- - - - - - - - - - - - - - - - - - - - - -
- Bit Field Description -
BitsMeaning
0-3Padding type. This four-bit value determines the - type of padding to use for the string. The values are: - - - - - - - - - - - - - - - - - - - - - - - - - - -
ValueDescription
0Null Terminate: A zero byte marks the end of the - string and is guaranteed to be present after - converting a long string to a short string. When - converting a short string to a long string the value is - padded with additional null characters as necessary. -
1Null Pad: Null characters are added to the end of - the value during conversions from short values to long - values but conversion in the opposite direction simply - truncates the value. -
2Space Pad: Space characters are added to the end of - the value during conversions from short values to long - values but conversion in the opposite direction simply - truncates the value. This is the Fortran - representation of the string. -
3-15Reserved -
-
4-7Character Set. The character set to use for - encoding the string. The only character set supported is - the 8-bit ASCII (zero) so no translations have been defined - yet.
8-23Reserved (zero).
-
- -

There are no properties defined for the string class. -

-

- -

Class specific information for Bitfields (Class 4): - -
-

- - - - - - - - - - - - - - - - - - - - - - -
- Bit Field Description -
BitsMeaning
0Byte Order. If zero, byte order is little-endian; - otherwise, byte order is big endian.
1, 2Padding type. Bit 1 is the lo_pad type and bit 2 - is the hi_pad type. If a datum has unused bits at either - end, then the lo_pad or hi_pad bit is copied to those - locations.
3-23Reserved (zero).
-
- -
-
- - - - - - - - - - - - - - -
- Property Description -
ByteByteByteByte
Bit OffsetBit Precision
-
- -
-
- - - - - - - - - - - - - - - -
Field NameDescription
Bit Offset -

The bit offset of the first significant bit of the bitfield - within the datatype. The bit offset specifies the number - of bits "to the right of" the value. -

-
Bit Precision -

The number of bits of precision of the bitfield - within the datatype. -

-
-
-

- -

Class specific information for Opaque (Class 5): - -
-

- - - - - - - - - - - - - - - - - -
- Bit Field Description -
BitsMeaning
0-7Length of ASCII tag in bytes.
8-23Reserved (zero).
-
- -
-
- - - - - - - - - - - - - -
- Property Description -
ByteByteByteByte

ASCII Tag
-
-
- -
-
- - - - - - - - - - -
Field NameDescription
ASCII Tag -

This NUL-terminated string provides a description for the - opaque type. It is NUL-padded to a multiple of 8 bytes. -

-
-
-

- -

Class specific information for Compound (Class 6): - -
-

- - - - - - - - - - - - - - - - -
- Bit Field Description -
BitsMeaning
0-15Number of Members. This field contains the number - of members defined for the compound datatype. The member - definitions are listed in the Properties field of the data - type message. -
15-23Reserved (zero).
-
-

- -

The Properties field of a compound datatype is a list of the - member definitions of the compound datatype. The member - definitions appear one after another with no intervening bytes. - The member types are described with a recursive datatype - message. - -

Note that the property descriptions are different for different - versions of the datatype version. Additionally note that the version - 0 properties are deprecated and have been replaced with the version - 1 properties in versions of the HDF5 library from the 1.4 release - onward. - -
-

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- Properties Description for Datatype Version 1 -
ByteByteByteByte

Name

Byte Offset of Member
DimensionalityReserved (zero)
Dimension Permutation
Reserved (zero)
Dimension #1 Size (required)
Dimension #2 Size (required)
Dimension #3 Size (required)
Dimension #4 Size (required)

Member Type Message

-
- -
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Field NameDescription
Name -

This NUL-terminated string provides a description for the - opaque type. It is NUL-padded to a multiple of 8 bytes. -

-
Byte Offset of Member -

This is the byte offset of the member within the datatype. -

-
Dimensionality -

If set to zero, this field indicates a scalar member. If set - to a value greater than zero, this field indicates that the - member is an array of values. For array members, the size of - the array is indicated by the 'Size of Dimension n' field in - this message. -

-
Dimension Permutation -

This field was intended to allow an array field to have - it's dimensions permuted, but this was never implemented. - This field should always be set to zero. -

-
Dimension #n Size -

This field is the size of a dimension of the array field as - stored in the file. The first dimension stored in the list of - dimensions is the slowest changing dimension and the last - dimension stored is the fastest changing dimension. -

-
Member Type Message -

This field is a datatype message describing the datatype of - the member. -

-
-
- -
-
- - - - - - - - - - - - - - - - - - - - - - -
- Properties Description for Datatype Version 2 -
ByteByteByteByte

Name

Byte Offset of Member

Member Type Message

-
- -
-
- - - - - - - - - - - - - - - - - - - - - -
Field NameDescription
Name -

This NUL-terminated string provides a description for the - opaque type. It is NUL-padded to a multiple of 8 bytes. -

-
Byte Offset of Member -

This is the byte offset of the member within the datatype. -

-
Member Type Message -

This field is a datatype message describing the datatype of - the member. -

-
-
-

- -

Class specific information for Reference (Class 7): - -
-

- - - - - - - - - - - - - - - - - -
- Bit Field Description -
BitsMeaning
0-3Type. This four-bit value contains the type of reference - described. The values defined are: - - - - - - - - - - - - - - - - - - - - - - - - - - -
ValueDescription
0Object Reference: A reference to another object in this - HDF5 file. -
1Dataset Region Reference: A reference to a region within - a dataset in this HDF5 file. -
2Internal Reference: A reference to a region within the - current dataset. (Not currently implemented) -
3-15Reserved -
- -
15-23Reserved (zero).
-
- -

There are no properties defined for the reference class. -

-

- -

Class specific information for Enumeration (Class 8): - -
-

- - - - - - - - - - - - - - - - - -
- Bit Field Description -
BitsMeaning
0-15Number of Members. The number of name/value - pairs defined for the enumeration type.
16-23Reserved (zero).
-
- -
-
- - - - - - - - - - - - - - - - - - - - - - -
- Property Description -
ByteByteByteByte

Base Type


Names


Values

-
- -
-
- - - - - - - - - - - - - - - - - - - - - -
Field NameDescription
Base Type -

Each enumeration type is based on some parent type, usually an - integer. The information for that parent type is described - recursively by this field. -

-
Names -

The name for each name/value pair. Each name is stored as a null - terminated ASCII string in a multiple of eight bytes. The names - are in no particular order. -

-
Values -

The list of values in the same order as the names. The values - are packed (no inter-value padding) and the size of each value - is determined by the parent type. -

-
-
-

- - -

Class specific information for Variable-Length (Class 9): - -
-

- - - - - - - - - - - - - - - - - - - - - - - - - - - -
- Bit Field Description -
BitsMeaning
0-3Type. This four-bit value contains the type of - variable-length datatype described. The values defined are: - - - - - - - - - - - - - - - - - - - - - -
ValueDescription
0Sequence: A variable-length sequence of any sequence of - data. Variable-length sequences do not have padding or - character set information. -
1String: A variable-length sequence of characters. - Variable-length strings have padding and character set - information. -
2-15Reserved -
- -
4-7Padding type. (variable-length string only) - This four-bit value determines the type of padding - used for variable-length strings. The values are the same - as for the string padding type, as follows: - - - - - - - - - - - - - - - - - - - - - - - - - -
ValueDescription
0Null terminate: A zero byte marks the end of a string - and is guaranteed to be present after converting a long - string to a short string. When converting a short string - to a long string, the value is padded with additional null - characters as necessary. -
1Null pad: Null characters are added to the end of the - value during conversion from a short string to a longer - string. Conversion from a long string to a shorter string - simply truncates the value. -
2Space pad: Space characters are added to the end of the - value during conversion from a short string to a longer - string. Conversion from a long string to a shorter string - simply truncates the value. This is the Fortran - representation of the string. -
3-15Reserved -
- - This value is set to zero for variable-length sequences. - -
8-11Character Set. (variable-length string only) - This four-bit value specifies the character set - to be used for encoding the string: - - - - - - - - - - - - - - - -
ValueDescription
0ASCII: As of this writing (July 2003, Release 1.6.0), - 8-bit ASCII is the only character set supported. Therefore, - no translations have been defined. -
1-15Reserved -
- - This value is set to zero for variable-length sequences. - -
12-23Reserved (zero).
-
- -
-
- - - - - - - - - - - - - - -
- Property Description -
ByteByteByteByte

Base Type

-
- -
-
- - - - - - - - - - - -
Field NameDescription
Base Type -

Each variable-length type is based on some parent type. The - information for that parent type is described recursively by - this field. -

-
-
-

- -

Class specific information for Array (Class 10): - -

There are no bit fields defined for the array class. -

- -

Note that the dimension information defined in the property for this - datatype class is independent of dataspace information for a dataset. - The dimension information here describes the dimensionality of the - information within a data element (or a component of an element, if the - array datatype is nested within another datatype) and the dataspace for a - dataset describes the location of the elements in a dataset. -

- -
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- Property Description -
ByteByteByteByte
DimensionalityReserved (zero)
Dimension #1 Size
.
.
.
Dimension #n Size
Permutation Index #1
.
.
.
Permutation Index #n

Base Type

-
- -
-
- - - - - - - - - - - - - - - - - - - - - - - - - - -
Field NameDescription
Dimensionality -

This value is the number of dimensions that the array has. -

-
Dimension #n Size -

This value is the size of the dimension of the array - as stored in the file. The first dimension stored in - the list of dimensions is the slowest changing dimension - and the last dimension stored is the fastest changing - dimension. -

-
Permutation Index #n -

This value is the index permutation used to map - each dimension from the canonical representation to an - alternate axis for each dimension. Currently, dimension - permutations are not supported and these indices should be set - to the index position minus one (i.e. the first dimension should - be set to 0, the second dimension should be set to 1, etc.) -

-
Base Type -

Each array type is based on some parent type. The - information for that parent type is described recursively by - this field. -

-
-
- -

- -
-

Name: Data Storage - Fill Value (Old)

- -

Header Message Type: 0x0004 -

-

Length: varies -

-

Status: Optional, may not be repeated. -

- -

Description: The fill value message stores a single - data value which is returned to the application when an uninitialized - data element is read from a dataset. The fill value is interpreted - with the same datatype as the dataset. If no fill value message is - present then a fill value of all zero bytes is assumed. -

- -

This fill value message is deprecated in favor of the "new" - fill value message (Message Type 0x0005) and is only written to the - file for forward compatibility with versions of the HDF5 library before - the 1.6.0 version. Additionally, it only appears for datasets with a - user defined fill value (as opposed to the library default fill value - or an explicitly set "undefined" fill value). -

- -

Format of Data: -
-

- - - - - - - - - - - - - - - - - -
- Fill Value Message (Old) -
bytebytebytebyte
Size

Fill Value

-
- -
-
- - - - - - - - - - - - - - - -
Field NameDescription
Size -

This is the size of the Fill Value field in bytes. -

-
Fill Value -

The fill value. The bytes of the fill value are interpreted - using the same datatype as for the dataset. -

-
-
-

- -
-

Name: Data Storage - Fill Value

- -

Header Message Type: 0x0005 -

-

Length: varies -

-

Status: Required for dataset objects, may not be repeated. -

- -

Description: The fill value message stores a single - data value which is returned to the application when an uninitialized - data element is read from a dataset. The fill value is interpreted - with the same datatype as the dataset. -

- -

Format of Data: -
-

- - - - - - - - - - - - - - - - - - - - - - - - -
- Fill Value Message -
bytebytebytebyte
VersionSpace Allocation TimeFill Value Write TimeFill Value Defined
Size

Fill Value

-
- -
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Field NameDescription
Version -

The version number information is used for changes in the - format of the fill value message and is described here: - - - - - - - - - - - - - - - - - - -
VersionDescription
0Never used -
1Used by version 1.6.x of the library to encode - fill values. In this version, the Size field is - always present. -
2The current version used by the library (version - 1.7.3 or later). In this version, the Size and - Fill Value fields are - only present if the Fill Value Defined field is set - to 1. -
-

-
Space Allocation Time -

When the storage space for the dataset's raw data will be - allocated. The allowed values are: - - - - - - - - - - - - - - - - - - -
ValueDescription
1Early allocation. Storage space for the entire dataset - should be allocated in the file when the dataset is - created. -
2Late allocation. Storage space for the entire dataset - should not be allocated until the dataset is written - to. -
3Incremental allocation. Storage space for the - dataset should not be allocated until the portion - of the dataset is written to. This is currently - used in conjunction with chunked data storage for - datasets. -
-

-
Fill Value Write Time -

At the time that storage space for the dataset's raw data is - allocated, this value indicates whether the fill value should - be written to the raw data storage elements. The allowed values - are: - - - - - - - - - - - - - - - - - - -
ValueDescription
0On allocation. The fill value is always written to - the raw data storage when the storage space is allocated. -
1Never. The fill value should never be written to - the raw data storage. -
2Fill value written if set by user. The fill value - will be written to the raw data storage when the storage - space is allocated only if the user explicitly set - the fill value. If the fill value is the library - default or is undefined, it will not be written to - the raw data storage. -
-

-
Fill Value Defined -

This value indicates if a fill value is defined for this - dataset. If this value is 0, the fill value is undefined. - If this value is 1, a fill value is defined for this dataset. - For version 2 or later of the fill value message, this value - controls the presence of the Size field. -

-
Size -

This is the size of the Fill Value field in bytes. This field - is not present if the Version field is >1 and the Fill Value - Defined field is set to 0. -

-
Fill Value -

The fill value. The bytes of the fill value are interpreted - using the same datatype as for the dataset. This field is - not present if the Version field is >1 and the Fill Value - Defined field is set to 0. -

-
-
-

- - - -
-

Name: Reserved - Not Assigned Yet

- Header Message Type: 0x0006
- Length: N/A
- Status: N/A
- Format of Data: N/A
- -

Purpose and Description: This message type was skipped during - the initial specification of the file format and may be used in a - future expansion to the format. - -


-

Name: Data Storage - - External Data Files

- Header Message Type: 0x0007
- Length: varies
- Status: Optional, may not be repeated.
- -

Purpose and Description: The external object message - indicates that the data for an object is stored outside the HDF5 - file. The filename of the object is stored as a Universal - Resource Location (URL) of the actual filename containing the - data. An external file list record also contains the byte offset - of the start of the data within the file and the amount of space - reserved in the file for that data. - -

-

- - - - - - - - - - - - - - - - - - - - - - - - - - - -
- External File List Message -
bytebytebytebyte
VersionReserved
Allocated SlotsUsed Slots

Heap Address


Slot Definitions...

-
- -

-

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Field NameDescription
Version This value is used to determine the format of the - External File List Message. When the format of the - information in the message is changed, the version number - is incremented and can be used to determine how the - information in the object header is formatted.
ReservedThis field is reserved for future use.
Allocated SlotsThe total number of slots allocated in the message. Its - value must be at least as large as the value contained in - the Used Slots field.
Used SlotsThe number of initial slots which contain valid - information. The remaining slots are zero filled.
Heap AddressThis is the address of a local name heap which contains - the names for the external files. The name at offset zero - in the heap is always the empty string.
Slot DefinitionsThe slot definitions are stored in order according to - the array addresses they represent. If more slots have - been allocated than what has been used then the defined - slots are all at the beginning of the list.
-
- -

-

- - - - - - - - - - - - - - - - - - - - - -
- External File List Slot -
bytebytebytebyte

Name Offset (<size> bytes)


File Offset (<size> bytes)


Size

-
- -

-

- - - - - - - - - - - - - - - - - - - - -
Field NameDescription
Name Offset (<size> bytes)The byte offset within the local name heap for the name - of the file. File names are stored as a URL which has a - protocol name, a host name, a port number, and a file - name: - protocol:port//host/file. - If the protocol is omitted then "file:" is assumed. If - the port number is omitted then a default port for that - protocol is used. If both the protocol and the port - number are omitted then the colon can also be omitted. If - the double slash and host name are omitted then - "localhost" is assumed. The file name is the only - mandatory part, and if the leading slash is missing then - it is relative to the application's current working - directory (the use of relative names is not - recommended).
File Offset (<size> bytes)This is the byte offset to the start of the data in the - specified file. For files that contain data for a single - dataset this will usually be zero.
SizeThis is the total number of bytes reserved in the - specified file for raw data storage. For a file that - contains exactly one complete dataset which is not - extendable, the size will usually be the exact size of the - dataset. However, by making the size larger one allows - HDF5 to extend the dataset. The size can be set to a value - larger than the entire file since HDF5 will read zeros - past the end of the file without failing.
-
- - -
-

Name: Data Storage - Layout

- - Header Message Type: 0x0008
- Length: varies
- Status: Required for datasets, may not be repeated. - -

Purpose and Description: Data layout describes how the - elements of a multi-dimensional array are arranged in the linear - address space of the file. Three types of data layout are - supported: - -

    -
  1. The array can be stored in one contiguous area of the file. - The layout requires that the size of the array be constant and - does not permit chunking, compression, checksums, encryption, - etc. The message stores the total size of the array and the - offset of an element from the beginning of the storage area is - computed as in C. - -
  2. The array domain can be regularly decomposed into chunks and - each chunk is allocated separately. This layout supports - arbitrary element traversals, compression, encryption, and - checksums, and the chunks can be distributed across external - raw data files (these features are described in other - messages). The message stores the size of a chunk instead of - the size of the entire array; the size of the entire array can - be calculated by traversing the B-tree that stores the chunk - addresses. - -
  3. The array can be stored in one contiguous block, as part of - this object header message (this is called "compact" storage below). -
- -

Version 3 of this message re-structured the format into specific - properties that are required for each layout class. - -

-

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- Data Layout Message, Versions 1 and 2 -
bytebytebytebyte
VersionDimensionalityLayout ClassReserved
Reserved

Address

Dimension 0 (4-bytes)
Dimension 1 (4-bytes)
...
Compact Data Size (4-bytes)
Compact Data
...
-
- -

-

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Field NameDescription
VersionA version number for the layout message. This value can be - either 1 or 2.
DimensionalityAn array has a fixed dimensionality. This field - specifies the number of dimension size fields later in the - message.
Layout ClassThe layout class specifies how the other fields of the - layout message are to be interpreted. A value of one - indicates contiguous storage, a value of two - indicates chunked storage, - while a value of zero - indicates compact storage. Other values will be defined - in the future.
AddressFor contiguous storage, this is the address of the first - byte of storage. For chunked storage this is the address - of the B-tree that is used to look up the addresses of the - chunks. This field is not present for compact storage. - If the version for this message is set to 2, the address - may have the "undefined address" value, to indicate that - storage has not yet been allocated for this array.
DimensionsFor contiguous storage the dimensions define the entire - size of the array while for chunked storage they define - the size of a single chunk.
Compact Data SizeThis field is only present for compact data storage. - It contains the size of the raw data for the dataset array.
Compact DataThis field is only present for compact data storage. - It contains the raw data for the dataset array.
-
- -

-

- - - - - - - - - - - - - - - - - - - -
- Data Layout Message, Version 3 -
bytebytebytebyte
VersionLayout Class
Properties
-
- -

-

- - - - - - - - - - - - - - - - - - - - - -
Field NameDescription
VersionA version number for the layout message. This value can be - either 1, 2 or 3.
Layout ClassThe layout class specifies how the other fields of the - layout message are to be interpreted. A value of one - indicates contiguous storage, a value of two - indicates chunked storage, - while a value of three - indicates compact storage.
PropertiesThis variable-sized field encodes information specific to each - layout class and is described below. If there is no property - information specified for a layout class, the size of this field - is zero bytes.
-
- -

Class-specific information for contiguous layout (Class 0): - -

-

- - - - - - - - - - - - - - - - - -
- Property Descriptions -
bytebytebytebyte

Address


Size

-
- -

-

- - - - - - - - - - - - - - -
Field NameDescription
AddressThis is the address of the first byte of raw data storage. - The address may have the "undefined address" value, to indicate - that storage has not yet been allocated for this array.
SizeThis field contains the size allocated to store the raw data.
-
- -

Class-specific information for chunked layout (Class 1): - -

-

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- Property Descriptions -
bytebytebytebyte
Dimensionality

Address

Dimension 0 (4-bytes)
Dimension 1 (4-bytes)
...
-
- -

-

- - - - - - - - - - - - - - - - - - - - -
Field NameDescription
DimensionalityA chunk has a fixed dimensionality. This field - specifies the number of dimension size fields later in the - message.
AddressThis is the address - of the B-tree that is used to look up the addresses of the - chunks. - The address - may have the "undefined address" value, to indicate that - storage has not yet been allocated for this array.
DimensionsThe dimension sizes define the size of a single chunk.
-
- -

Class-specific information for compact layout (Class 2): - -

-

- - - - - - - - - - - - - - - - - - - - - -
- Property Descriptions -
bytebytebytebyte
Size
Raw Data
...
-
- -

-

- - - - - - - - - - - - - - -
Field NameDescription
SizeThis field contains the size of the raw data for the dataset array.
Raw DataThis field contains the raw data for the dataset array.
-
- - -
-

Name: Reserved - Not Assigned Yet

- Header Message Type: 0x0009
- Length: N/A
- Status: N/A
- Format of Data: N/A
- -

Purpose and Description: This message type was skipped during - the initial specification of the file format and may be used in a - future expansion to the format. - -


-

Name: Reserved - Not Assigned Yet

- Header Message Type: 0x000A
- Length: N/A
- Status: N/A
- Format of Data: N/A
- -

Purpose and Description: This message type was skipped during - the initial specification of the file format and may be used in a - future expansion to the format. - -


-

Name: Data Storage - Filter Pipeline

- Header Message Type: 0x000B
- Length: varies
- Status: Optional, may not be repeated. - -

Purpose and Description: This message describes the - filter pipeline which should be applied to the data stream by - providing filter identification numbers, flags, a name, an - client data. - -

-

- - - - - - - - - - - - - - - - - - - - - - - -
- Filter Pipeline Message -
bytebytebytebyte
VersionNumber of FiltersReserved
Reserved

Filter List

-
- -

-

- - - - - - - - - - - - - - - - - - - - -
Field NameDescription
VersionThe version number for this message. This document - describes version one.
Number of FiltersThe total number of filters described by this - message. The maximum possible number of filters in a - message is 32.
Filter ListA description of each filter. A filter description - appears in the next table.
-
- -

-

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- Filter Pipeline Message -
bytebytebytebyte
Filter IdentificationName Length
FlagsClient Data Number of Values

Name


Client Data

Padding
-
- -

-

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Field NameDescription
Filter IdentificationThis is a unique (except in the case of testing) - identifier for the filter. Values from zero through 255 - are reserved for filters defined by the NCSA HDF5 - library. Values 256 through 511 have been set aside for - use when developing/testing new filters. The remaining - values are allocated to specific filters by contacting the - HDF5 Development - Team.
Name LengthEach filter has an optional null-terminated ASCII name - and this field holds the length of the name including the - null termination padded with nulls to be a multiple of - eight. If the filter has no name then a value of zero is - stored in this field.
FlagsThe flags indicate certain properties for a filter. The - bit values defined so far are: - -
-
bit 1 -
If set then the filter is an optional filter. - During output, if an optional filter fails it will be - silently removed from the pipeline. -
-
Client Data Number of ValuesEach filter can store a few integer values to control - how the filter operates. The number of entries in the - Client Data array is stored in this field.
NameIf the Name Length field is non-zero then it will - contain the size of this field, a multiple of eight. This - field contains a null-terminated, ASCII character - string to serve as a comment/name for the filter.
Client DataThis is an array of four-byte integers which will be - passed to the filter function. The Client Data Number of - Values determines the number of elements in the - array.
PaddingFour bytes of zeros are added to the message at this - point if the Client Data Number of Values field contains - an odd number.
-
- -
-

Name: Attribute

- Header Message Type: 0x000C
- Length: varies
- Status: Optional, may be repeated.
- -

Purpose and Description: The Attribute - message is used to list objects in the HDF file which are used - as attributes, or "metadata" about the current object. An - attribute is a small dataset; it has a name, a datatype, a data - space, and raw data. Since attributes are stored in the object - header they must be relatively small (<64KB) and can be - associated with any type of object which has an object header - (groups, datasets, named types and spaces, etc.). - -

Note: Attributes on an object must have unique names. (The HDF5 library - currently enforces this by causing the creation of an attribute with - a duplicate name to fail) - Attributes on different objects may have the same name, however. - -

-

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- Attribute Message -
bytebytebytebyte
VersionReservedName Size
Type SizeSpace Size

Name


Type


Space


Data

-
- -

-

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Field NameDescription
VersionVersion number for the message. This document describes - version 1 of attribute messages.
ReservedThis field is reserved for later use and is set to - zero.
Name SizeThe length of the attribute name in bytes including the - null terminator. Note that the Name field below may - contain additional padding not represented by this - field.
Type SizeThe length of the datatype description in the Type - field below. Note that the Type field may contain - additional padding not represented by this field.
Space SizeThe length of the dataspace description in the Space - field below. Note that the Space field may contain - additional padding not represented by this field.
NameThe null-terminated attribute name. This field is - padded with additional null characters to make it a - multiple of eight bytes.
TypeThe datatype description follows the same format as - described for the datatype object header message. This - field is padded with additional zero bytes to make it a - multiple of eight bytes.
SpaceThe dataspace description follows the same format as - described for the dataspace object header message. This - field is padded with additional zero bytes to make it a - multiple of eight bytes.
DataThe raw data for the attribute. The size is determined - from the datatype and dataspace descriptions. This - field is not padded with additional zero - bytes.
-
- -
-

Name: Object Comment

- -

Header Message Type: 0x000D
- Length: varies
- Status: Optional, may not be repeated. - -

Purpose and Description: The object comment is - designed to be a short description of an object. An object comment - is a sequence of non-zero (\0) ASCII characters with no other - formatting included by the library. - -

-

- - - - - - - - - - - - - -
- Name Message -
bytebytebytebyte

Comment

-
- -

-

- - - - - - - - - - -
Field NameDescription
NameA null terminated ASCII character string.
-
- -
-

Name: Object Modification Date & Time (Old)

- -

Header Message Type: 0x000E
- Length: fixed
- Status: Optional, may not be repeated. - -

Purpose and Description: The object modification date - and time is a timestamp which indicates (using ISO-8601 date and - time format) the last modification of an object. The time is - updated when any object header message changes according to the - system clock where the change was posted. - -

This modification time message is deprecated in favor of the "new" - modification time message (Message Type 0x0012) and is no longer written - to the file in versions of the HDF5 library after the 1.6.0 version. -

- - -

-

- - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- Modification Time Message -
bytebytebytebyte
Year
MonthDay of Month
HourMinute
SecondReserved
-
- -

-

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Field NameDescription
YearThe four-digit year as an ASCII string. For example, - 1998. All fields of this message should be interpreted - as coordinated universal time (UTC)
MonthThe month number as a two digit ASCII string where - January is 01 and December is 12.
Day of MonthThe day number within the month as a two digit ASCII - string. The first day of the month is 01.
HourThe hour of the day as a two digit ASCII string where - midnight is 00 and 11:00pm is 23.
MinuteThe minute of the hour as a two digit ASCII string where - the first minute of the hour is 00 and - the last is 59.
SecondThe second of the minute as a two digit ASCII string - where the first second of the minute is 00 - and the last is 59.
ReservedThis field is reserved and should always be zero.
-
- -
-

Name: Shared Object Message

- Header Message Type: 0x000F
- Length: 4 Bytes
- Status: Optional, may be repeated. - -

A constant message can be shared among several object headers - by writing that message in the global heap and having the object - headers all point to it. The pointing is accomplished with a - Shared Object message which is understood directly by the object - header layer of the library. It is also possible to have a - message of one object header point to a message in some other - object header, but care must be exercised to prevent cycles. - -

If a message is shared, then the message appears in the global - heap and its message ID appears in the Header Message Type - field of the object header. Also, the Flags field in the object - header for that message will have bit two set (the - H5O_FLAG_SHARED bit). The message body in the - object header will be that of a Shared Object message defined - here and not that of the pointed-to message. - -

-

- - - - - - - - - - - - - - - - - - - -
- Shared Message Message -
byte - byte - byte - byte -
VersionFlagsReserved
Reserved

Pointer

-
- -

-

- - - - - - - - - - - - - - - - - - - -
Field NameDescription
VersionThe version number for the message. This document - describes version one of shared messages.
FlagsThe Shared Message message points to a message which is - shared among multiple object headers. The Flags field - describes the type of sharing: - -
-
Bit 0 -
If this bit is clear then the actual message is the - first message in some other object header; otherwise - the actual message is stored in the global heap. - -
Bits 2-7 -
Reserved (always zero) -
-
PointerThis field points to the actual message. The format of - the pointer depends on the value of the Flags field. If - the actual message is in the global heap then the pointer - is the file address of the global heap collection that - holds the message, and a four-byte index into that - collection. Otherwise the pointer is a group entry - that points to some other object header.
-
- - -
-

Name: Object Header Continuation

-Header Message Type: 0x0010
-Length: fixed
-Status: Optional, may be repeated.
-Purpose and Description: The object header continuation is the location -in the file of more header messages for the current data object. This can be -used when header blocks are large, or likely to change over time.
-Format of Data:

- The object header continuation is formatted as follows (assuming a 4-byte -length & offset are being used in the current file): - -

-

- - - - - - - - - - - - - -
-HDF5 Object Header Continuation Message Layout -
bytebytebytebyte
Header Continuation Offset
Header Continuation Length
-
- -

-

-
The elements of the Header Continuation Message are described below: -
-
-
Header Continuation Offset: (<offset> bytes) -
This value is the offset in bytes from the beginning of the file where the -header continuation information is located. -
Header Continuation Length: (<length> bytes) -
This value is the length in bytes of the header continuation information in -the file. -
-
- -
-

Name: Group Message

-Header Message Type: 0x0011
-Length: fixed
-Status: Required for groups, may not be repeated.
-Purpose and Description: Each group has a B-tree and a -name heap which are pointed to by this message.
-Format of data: -

The group message is formatted as follows: - -

-

- - - - - - - - - - - - - - -
-HDF5 Object Header Group Message Layout -
bytebytebytebyte
B-tree Address
Heap Address
-
- -

-

-
The elements of the Group Message are described below: -
-
-
B-tree Address (<offset> bytes) -
This value is the offset in bytes from the beginning of the file -where the B-tree is located. -
Heap Address (<offset> bytes) -
This value is the offset in bytes from the beginning of the file -where the group name heap is located. -
-
- -
-

Name: Object Modification Date & Time

- -

Header Message Type: 0x0012 -

-

Length: Fixed -

-

Status: Optional, may not be repeated. -

- -

Description: The object modification date - and time is a timestamp which indicates - the last modification of an object. The time is - updated when any object header message changes according to the - system clock where the change was posted. -

- -

-

- - - - - - - - - - - - - - - - - - -
- Modification Time Message -
bytebytebytebyte
VersionReserved
Seconds After Epoch
-
- -

-

- - - - - - - - - - - - - - - - - - - -
Field NameDescription
VersionThe version number for the message. This document - describes version one of the new modification time message.
ReservedThis field is reserved and should always be zero.
Seconds After EpochThe number of seconds since 0 hours, 0 - minutes, 0 seconds, January 1, 1970, Coordinated Universal Time. -
-
- -

Disk Format: Level 2b - Shared Data Object Headers

-

In order to share header messages between several dataset objects, object -header messages may be placed into the global heap. Since these -messages require additional information beyond the basic object header message -information, the format of the shared message is detailed below. - -

-

- - - - - - - - - - - - - -
-HDF5 Shared Object Header Message -
bytebytebytebyte
Reference Count of Shared Header Message

Shared Object Header Message

-
- -

-

-
The elements of the shared object header message are described below: -
-
-
Reference Count of Shared Header Message: (32-bit unsigned integer) -
This value is used to keep a count of the number of dataset objects which -refer to this message from their dataset headers. When this count reaches zero, -the shared message header may be removed from the global heap. -
Shared Object Header Message: (various lengths) -
The data stored for the shared object header message is formatted in the -same way as the private object header messages described in the object header -description earlier in this document and begins with the header message Type. -
-
- - -

Disk Format: Level 2c - Data Object Data Storage

-

The data for an object is stored separately from the header -information in the file and may not actually be located in the HDF5 file -itself if the header indicates that the data is stored externally. The -information for each record in the object is stored according to the -dimensionality of the object (indicated in the dimensionality header message). -Multi-dimensional data is stored in C order [same as current scheme], i.e. the -"last" dimension changes fastest. -

Data whose elements are composed of simple number-types are stored in -native-endian IEEE format, unless they are specifically defined as being stored -in a different machine format with the architecture-type information from the -number-type header message. This means that each architecture will need to -[potentially] byte-swap data values into the internal representation for that -particular machine. -

Data with a variable-length datatype is stored in the global heap -of the HDF5 file. Global heap identifiers are stored in the -data object storage. -

Data whose elements are composed of pointer number-types are stored in several -different ways depending on the particular pointer type involved. Simple -pointers are just stored as the dataset offset of the object being pointed to with the -size of the pointer being the same number of bytes as offsets in the file. -Dataset region references are stored as a heap-ID which points to the following -information within the file-heap: an offset of the object pointed to, number-type -information (same format as header message), dimensionality information (same -format as header message), sub-set start and end information (i.e. a coordinate -location for each), and field start and end names (i.e. a [pointer to the] -string indicating the first field included and a [pointer to the] string name -for the last field). - -

Data of a compound datatype is stored as a contiguous stream of the items -in the structure, with each item formatted according to its datatype.

- -

Appendix

-

Definitions of various terms used in this document. -

-

The "undefined address" for a file is a -file address with all bits set, i.e. 0xffff...ff. -

The "unlimited size" for a size is a -value with all bits set, i.e. 0xffff...ff. - -


-
- - - -
- HDF5 documents and links 
- Introduction to HDF5 
- -
- HDF5 User's Guide 
- HDF5 Reference Manual 
- HDF5 Application Developer's Guide 
-
-
-
- - - -
-HDF Help Desk -
-Describes HDF5 Release 1.7, the unreleased development branch; working toward HDF5 Release 1.8.0 -
-Last modified: 12 July 2004 - - - - diff --git a/doc/html/H5.intro.html b/doc/html/H5.intro.html deleted file mode 100644 index 8984153..0000000 --- a/doc/html/H5.intro.html +++ /dev/null @@ -1,3161 +0,0 @@ - - -Introduction to HDF5 - - - - - - - - - -
-
- - - -
-Introduction to HDF5 
-HDF5 User Guide  - -
-HDF5 Reference Manual 
-Other HDF5 documents and links  -
-
-
- -

Introduction to HDF5

- -

This is an introduction to the HDF5 data model and programming model. Being a Getting Started or QuickStart document, this Introduction to HDF5 is intended to provide enough information for you to develop a basic understanding of how HDF5 works and is meant to be used. Knowledge of the current version of HDF will make it easier to follow the text, but it is not required. More complete information of the sort you will need to actually use HDF5 is available in the HDF5 documentation. Available documents include the following: - -

- -

Code examples are available in the source code tree when you install HDF5. - -

    -
  • The directories hdf5/examples, -hdf5/doc/html/examples/, and -hdf5/doc/html/Tutor/examples/ contain the examples -used in this document. -
  • The directory hdf5/test contains the development tests used by the HDF5 developers. Since these codes are intended to fully exercise the system, they provide more diverse and sophisticated examples of what HDF5 can do.
- - -
-
- - - -
Table of Contents
- - Introduction to HDF5

- 1. What Is HDF5?
- -     Why HDF5?
- -     Changes in the - Current Release

- - 2. HDF5 File Organization and
-     Data Model
- -     HDF5 Groups
-     HDF5 Datasets
-     HDF5 Attributes
-     The File as Written to Media

- - 3. The HDF5 API
- -     Naming - Conventions
-     Include Files
-     Programming - Models
-          - Creating an HDF5 file
-          - Discarding objects
-          - Writing a dataset to a - new file
-          - Getting information about - a dataset
-          - Reading/writing a portion of - a dataset
-          - Selecting hyperslabs
-          - Selecting of independent - points
-          - Creating - variable-length datatypes
-          - Creating array datatypes
-          - Creating compound - datatypes - -

   - - 3. The HDF5 API (continued)
- -     Programming - Models (continued)
-          - Creating/writing extendible - and
-          -          - chunked datasets
-          - Working with groups
-          - Working with attributes
-          - Working with references to - objects
-          - Working with references to - dataset
-          -          - regions

- - Introduction to HDF5 -- Example Codes
- -          - 1: Creating and writing a dataset
-          - 2. Reading a hyperslab
-          - 3. Writing selected data
-          - 4. Working with variable-length
-          -          - datatypes
-          - 5. Working with array datatypes
-          - 6. Working with compound datatypes
-          - 7. Creating and writing an - extendible
-          -          - dataset
-          - 8. Reading data
-          - 9. Creating groups
-          - 10. Writing and reading - attributes
-          - 11. Creating and writing references
-          -          - to objects
-          - 12. Reading references to objects
-          - 13. Creating and writing references
-          -          - to dataset regions
-          - 14. Reading references to dataset
-          -          - regions -
-

-
-

- -


-

1. What Is HDF5?

-

HDF5 is a completely new Hierarchical Data Format -product consisting of a data format specification and a -supporting library implementation. HDF5 is designed to address some -of the limitations of the older HDF product and to address current and -anticipated requirements of modern systems and applications. -1 -

We urge you to look at HDF5, the format and the library, and give us -feedback on what you like or do not like about it, and what features -you would like to see added to it. - -

Why HDF5? -The development of HDF5 is motivated by a number of limitations in the -older HDF format and library. Some of these limitations are: - -

    -
  • A single file cannot store more than 20,000 complex objects, and a single file cannot be larger than 2 gigabytes. -
  • The data models are less consistent than they should be, there are more object types than necessary, and datatypes are too restricted. -
  • The library source is old and overly complex, does not support parallel I/O effectively, and is difficult to use in threaded applications.
- -

HDF5 includes the following improvements. - -

    -
  • A new file format designed to address some of the deficiencies of HDF4.x, particularly the need to store larger files and more objects per file. -
  • A simpler, more comprehensive data model that includes only two basic structures: a multidimensional array of record structures, and a grouping structure. -
  • A simpler, better-engineered library and API, with improved support for parallel I/O, threads, and other requirements imposed by modern systems and applications.
- - -1. -Note that HDF and HDF5 are two different products. -HDF is a data format first developed in the 1980s and currently -in Release 4.x (HDF Release 4.x). -HDF5 is a new data format first released in Beta in 1998 and -designed to better meet the ever-increasing demands of scientific computing -and to take better advantage of the ever-increasing capabilities of -computing systems. -HDF5 is currently in Release 1.x (HDF5 Release 1.x). - - - -

(Return to TOC) - - -

Changes in the Current Release

-

A detailed list of changes in HDF5 between the current release and -the preceding major release can be found in the file -RELEASE.txt, -with a highlights summary in the document -"HDF5 Software Changes from Release to Release" -in the -HDF5 Application Developer's Guide. - - -

(Return to TOC) -


- - -

2. HDF5 File Organization and Data Model

-

HDF5 files are organized in a hierarchical structure, with two primary structures: groups and datasets. - -

    -
  • HDF5 group: a grouping structure containing instances of zero or more groups or datasets, together with supporting metadata. -
  • HDF5 dataset: a multidimensional array of data elements, together with supporting metadata.
- -

Working with groups and group members is similar in many ways to working with directories and files in UNIX. As with UNIX directories and files, objects in an HDF5 file are often described by giving their full (or absolute) path names. -

-
/ signifies the root group.
-
/foo signifies a member of the root group called foo.
-
/foo/zoo signifies a member of the group foo, which in turn is a member of the root group.
-
-

Any HDF5 group or dataset may have an associated attribute list. An HDF5 attribute is a user-defined HDF5 structure that provides extra information about an HDF5 object. Attributes are described in more detail below. - - -

(Return to TOC) - - -

HDF5 Groups

-

An HDF5 group is a structure containing zero or more HDF5 objects. A group has two parts: - -

    -
  • A group header, which contains a group name and a list of group attributes. -
  • A group symbol table, which is a list of the HDF5 objects that belong to the group.
- - -

(Return to TOC) - - -

HDF5 Datasets

-

A dataset is stored in a file in two parts: a header and a data array. -

The header contains information that is needed to interpret the array portion of the dataset, as well as metadata (or pointers to metadata) that describes or annotates the dataset. Header information includes the name of the object, its dimensionality, its number-type, information about how the data itself is stored on disk, and other information used by the library to speed up access to the dataset or maintain the file's integrity. -

There are four essential classes of information in any header: name, datatype, dataspace, and storage layout: -

Name. A dataset name is a sequence of alphanumeric ASCII characters. -

Datatype. HDF5 allows one to define many different kinds of datatypes. There are two categories of datatypes: atomic datatypes and compound datatypes. -Atomic datatypes can also be system-specific, or NATIVE, and all datatypes can be named: -

    -
  • Atomic datatypes are those that are not decomposed at the datatype interface level, such as integers and floats. -
  • NATIVE datatypes are system-specific instances of atomic datatypes. -
  • Compound datatypes are made up of atomic datatypes. -
  • Named datatypes are either atomic or compound datatypes that have been specifically designated to be shared across datasets. -
-

Atomic datatypes include integers and floating-point numbers. Each atomic type belongs to a particular class and has several properties: size, order, precision, and offset. In this introduction, we consider only a few of these properties. -

Atomic classes include integer, float, date and time, string, bit field, and opaque. (Note: Only integer, float and string classes are available in the current implementation.) -

Properties of integer types include size, order (endian-ness), and signed-ness (signed/unsigned). -

Properties of float types include the size and location of the exponent and mantissa, and the location of the sign bit. -

The datatypes that are supported in the current implementation are: - -

    -
  • Integer datatypes: 8-bit, 16-bit, 32-bit, and 64-bit integers in both little and big-endian format -
  • Floating-point numbers: IEEE 32-bit and 64-bit floating-point numbers in both little and big-endian format -
  • References -
  • Strings
- -

-NATIVE datatypes. Although it is possible to describe nearly any kind of atomic datatype, most applications will use predefined datatypes that are supported by their compiler. In HDF5 these are called native datatypes. NATIVE datatypes are C-like datatypes that are generally supported by the hardware of the machine on which the library was compiled. In order to be portable, applications should almost always use the NATIVE designation to describe data values in memory. -

The NATIVE architecture has base names which do not follow the same rules as the others. Instead, native type names are similar to the C type names. The following figure shows several examples. -

- -

-Examples of Native Datatypes and Corresponding C Types
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-

Example

-

Corresponding C Type

-H5T_NATIVE_CHAR -signed char
-H5T_NATIVE_UCHAR -unsigned char
-H5T_NATIVE_SHORT -short
-H5T_NATIVE_USHORT -unsigned short
-H5T_NATIVE_INT -int
-H5T_NATIVE_UINT -unsigned
-H5T_NATIVE_LONG -long
-H5T_NATIVE_ULONG -unsigned long
-H5T_NATIVE_LLONG -long long
-H5T_NATIVE_ULLONG -unsigned long long
-H5T_NATIVE_FLOAT -float
-H5T_NATIVE_DOUBLE -double
-H5T_NATIVE_LDOUBLE -long double
-H5T_NATIVE_HSIZE -hsize_t
-H5T_NATIVE_HSSIZE -hssize_t
-H5T_NATIVE_HERR -herr_t
-H5T_NATIVE_HBOOL -hbool_t
-
- -

See Datatypes in the HDF User’s Guide for further information. - - -

A compound datatype is one in which a -collection of several datatypes are represented as a single unit, -a compound datatype, similar to a struct in C. -The parts of a compound datatype are called members. -The members of a compound datatype may be of any datatype, -including another compound datatype. It is possible to read members -from a compound type without reading the whole type. -

-

Named datatypes. Normally each dataset has its own datatype, but sometimes we may want to share a datatype among several datasets. This can be done using a named datatype. A named datatype is stored in the file independently of any dataset, and referenced by all datasets that have that datatype. Named datatypes may have an associated attributes list. -See Datatypes in the HDF User’s Guide for further information. -

Dataspace. A dataset dataspace describes the dimensionality of the dataset. The dimensions of a dataset can be fixed (unchanging), or they may be unlimited, which means that they are extendible (i.e. they can grow larger). -

Properties of a dataspace consist of the rank (number of dimensions) of the data array, the actual sizes of the dimensions of the array, and the maximum sizes of the dimensions of the array. For a fixed-dimension dataset, the actual size is the same as the maximum size of a dimension. When a dimension is unlimited, the maximum size is set to the value H5P_UNLIMITED. (An example below shows how to create extendible datasets.) -

A dataspace can also describe portions of a dataset, making it possible to do partial I/O operations on selections. Selection is supported by the dataspace interface (H5S). Given an n-dimensional dataset, there are currently four ways to do partial selection: -

    - -
  1. Select a logically contiguous n-dimensional hyperslab. -
  2. Select a non-contiguous hyperslab consisting of elements or blocks of elements (hyperslabs) that are equally spaced. -
  3. Select a union of hyperslabs. -
  4. Select a list of independent points.
- -

Since I/O operations have two end-points, the raw data transfer functions require two dataspace arguments: one describes the application memory dataspace or subset thereof, and the other describes the file dataspace or subset thereof. -

See Dataspaces in the HDF User’s Guide for further information. -

Storage layout. The HDF5 format makes it possible to store data in a variety of ways. The default storage layout format is contiguous, meaning that data is stored in the same linear way that it is organized in memory. Two other storage layout formats are currently defined for HDF5: compact, and chunked. In the future, other storage layouts may be added. -

Compact storage is used when the amount of data is small and can be stored directly in the object header. (Note: Compact storage is not supported in this release.) -

Chunked storage involves dividing the dataset into equal-sized "chunks" that are stored separately. Chunking has three important benefits. -

    - -
  1. It makes it possible to achieve good performance when accessing subsets of the datasets, even when the subset to be chosen is orthogonal to the normal storage order of the dataset. -
  2. It makes it possible to compress large datasets and still achieve good performance when accessing subsets of the dataset. -
  3. It makes it possible efficiently to extend the dimensions of a dataset in any direction.
- -

-See Datasets and Dataset Chunking Issues in the HDF User’s Guide for further information. -We particularly encourage you to read Dataset Chunking Issues since the issue is complex and beyond the scope of this document. - - -

(Return to TOC) - - -

HDF5 Attributes

-Attributes are small named datasets that are attached to primary datasets, groups, or named datatypes. Attributes can be used to describe the nature and/or the intended usage of a dataset or group. An attribute has two parts: (1) a name and (2) a value. The value part contains one or more data entries of the same datatype. -

The Attribute API (H5A) is used to read or write attribute information. When accessing attributes, they can be identified by name or by an index value. The use of an index value makes it possible to iterate through all of the attributes associated with a given object. -

The HDF5 format and I/O library are designed with the assumption that attributes are small datasets. They are always stored in the object header of the object they are attached to. Because of this, large datasets should not be stored as attributes. How large is "large" is not defined by the library and is up to the user's interpretation. (Large datasets with metadata can be stored as supplemental datasets in a group with the primary dataset.) -

See Attributes in the HDF User’s Guide for further information. - - - -

(Return to TOC) - - -

The File as Written to Media

- -

For those who are interested, this section takes a look at - the low-level elements of the file as the file is written to disk - (or other storage media) and the relation of those low-level - elements to the higher level elements with which users typically - are more familiar. The HDF5 API generally exposes only the - high-level elements to the user; the low-level elements are - often hidden. - The rest of this Introduction does not assume - an understanding of this material. - -

The format of an HDF5 file on disk encompasses several - key ideas of the HDF4 and AIO file formats as well as - addressing some shortcomings therein. The new format is - more self-describing than the HDF4 format and is more - uniformly applied to data objects in the file. - - - -
-
- HDF5 Groups -
 
- Figure 1: Relationships among the - HDF5 root group, other groups, and objects -
-
 
- - -

An HDF5 file appears to the user as a directed graph. - The nodes of this graph are the higher-level HDF5 objects - that are exposed by the HDF5 APIs: - -

    -
  • Groups -
  • Datasets -
  • Datatypes -
  • Dataspaces -
- -

At the lowest level, as information is actually written to the disk, - an HDF5 file is made up of the following objects: -

    -
  • A super block -
  • B-tree nodes (containing either symbol nodes or raw data chunks) -
  • Object headers - - - -
      -
    - HDF5 Objects -
      - Figure 2: HDF5 objects -- datasets, datatypes, or dataspaces -
    -
    - -
  • Collections -
  • Local heaps -
  • Free space -
- - The HDF5 library uses these lower-level objects to represent the - higher-level objects that are then presented to the user or - to applications through the APIs. - For instance, a group is an object header that contains a message that - points to a local heap and to a B-tree which points to symbol nodes. - A dataset is an object header that contains messages that describe - datatype, space, layout, filters, external files, fill value, etc - with the layout message pointing to either a raw data chunk or to a - B-tree that points to raw data chunks. - -

See the HDF5 File Format - Specification for further information. - - -

(Return to TOC) -


- - -

3. The HDF5 Applications Programming Interface (API)

-

The current HDF5 API is implemented only in C. The API provides routines for creating HDF5 files, creating and writing groups, datasets, and their attributes to HDF5 files, and reading groups, datasets and their attributes from HDF5 files. -

Naming conventions

-

All C routines in the HDF 5 library begin with a prefix of the form H5*, where * is a single letter indicating the object on which the operation is to be performed: - -

    -
  • H5F: File-level access routines.
    -Example: H5Fopen, which opens an HDF5 file. -
  • H5G: Group functions, for creating and operating on groups of objects.
    -Example: H5Gset,which sets the working group to the specified group. -
  • H5T: DataType functions, for creating and operating on simple and compound datatypes to be used as the elements in data arrays.
    -
    Example: H5Tcopy,which creates a copy of an existing datatype. -
  • H5S: DataSpace functions, which create and manipulate the dataspace in which the elements of a data array are stored.
    -Example: H5Screate_simple, which creates simple dataspaces. -
  • H5D: Dataset functions, which manipulate the data within datasets and determine how the data is to be stored in the file.
    -Example: H5Dread, which reads all or part of a dataset into a buffer in memory. -
  • H5P: Property list functions, for manipulating object creation and access properties.
    -Example: H5Pset_chunk, which sets the number of dimensions and the size of a chunk. -
  • H5A: Attribute access and manipulating routines.
    -Example: H5Aget_name, which retrieves name of an attribute. -
  • H5Z: Compression registration routine.
    -Example: H5Zregister, which registers new compression and uncompression functions for use with the HDF5 library. -
  • H5E: Error handling routines.
    -Example: H5Eprint, which prints the current error stack. -
  • H5R: Reference routines.
    -Example: H5Rcreate, which creates a reference. -
  • H5I: Identifier routine.
    -Example: H5Iget_type, which retrieves the type of an object.
- - -

(Return to TOC) - - -

Include Files

-

There are a number definitions and declarations that should be included with any HDF5 program. These definitions and declarations are contained in several include files. The main include file is hdf5.h. This file includes all of the other files that your program is likely to need. Be sure to include hdf5.h in any program that uses the HDF5 library. - - -

(Return to TOC) - - -

Programming Models

-

In this section we describe how to program some basic operations on files, including how to - -

    -
  • Create a file. -
  • Create and initialize a dataset. -
  • Discard objects when they are no longer needed. -
  • Write a dataset to a new file. -
  • Obtain information about a dataset. -
  • Read a portion of a dataset. -
  • Create and write compound datatypes. -
  • Create and write extendible datasets. -
  • Create and populate groups. -
  • Work with attributes.
- - -

(Return to TOC) - - -

How to create an HDF5 file

-

This programming model shows how to create a file and also how to close the file. -

    - -
  1. Create the file. -
  2. Close the file. -
- -

The following code fragment implements the specified model. If there is a possibility that the file already exists, the user must add the flag H5ACC_TRUNC to the access mode to overwrite the previous file's information. - -

hid_t       file;                          /* identifier */
-/*
-* Create a new file using H5ACC_TRUNC access,
-* default file creation properties, and default file
-* access properties.
-* Then close the file.
-*/
-file = H5Fcreate(FILE, H5ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
-status = H5Fclose(file); 
-
-
 
-
- - -

(Return to TOC) - - -

How to create and initialize the essential components of a dataset for writing to a file

-

Recall that datatypes and dimensionality (dataspace) are independent objects, which are created separately from any dataset that they might be attached to. Because of this the creation of a dataset requires, at a minimum, separate definitions of datatype, dimensionality, and dataset. Hence, to create a dataset the following steps need to be taken: -

    -
  1. Create and initialize a dataspace for the dataset to be written. -
  2. Define the datatype for the dataset to be written. -
  3. Create and initialize the dataset itself.
- -

The following code illustrates the creation of these three components of a dataset object. -

hid_t    dataset, datatype, dataspace;   /* declare identifiers */
-
-/* 
- * Create dataspace: Describe the size of the array and 
- * create the data space for fixed size dataset. 
- */
-dimsf[0] = NX;
-dimsf[1] = NY;
-dataspace = H5Screate_simple(RANK, dimsf, NULL); 
-/*
- * Define datatype for the data in the file.
- * We will store little endian integer numbers.
- */
-datatype = H5Tcopy(H5T_NATIVE_INT);
-status = H5Tset_order(datatype, H5T_ORDER_LE);
-/*
- * Create a new dataset within the file using defined 
- * dataspace and datatype and default dataset creation
- * properties.
- * NOTE: H5T_NATIVE_INT can be used as datatype if conversion
- * to little endian is not needed.
- */
-dataset = H5Dcreate(file, DATASETNAME, datatype, dataspace, H5P_DEFAULT);
- - - -

(Return to TOC) - - -

How to discard objects when they are no longer needed

-

The datatype, dataspace and dataset objects should be released once they are no longer needed by a program. Since each is an independent object, the must be released (or closed) separately. The following lines of code close the datatype, dataspace, and datasets that were created in the preceding section. -

H5Tclose(datatype); -

H5Dclose(dataset); -

H5Sclose(dataspace); - - - -

(Return to TOC) - - -

How to write a dataset to a new file

-

Having defined the datatype, dataset, and dataspace parameters, you write out the data with a call to H5Dwrite. -

/*
-* Write the data to the dataset using default transfer
-* properties.
-*/
-status = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL,
-	  H5P_DEFAULT, data);
-

The third and fourth parameters of H5Dwrite in the example describe the dataspaces in memory and in the file, respectively. They are set to the value H5S_ALL to indicate that an entire dataset is to be written. In a later section we look at how we would access a portion of a dataset. -

Example 1 contains a program that creates a file and a dataset, and writes the dataset to the file. -

Reading is analogous to writing. If, in the previous example, we wish to read an entire dataset, we would use the same basic calls with the same parameters. Of course, the routine H5Dread would replace H5Dwrite. - - - -

(Return to TOC) - - -

Getting information about a dataset

-

Although reading is analogous to writing, it is often necessary to query a file to obtain information about a dataset. For instance, we often need to know about the datatype associated with a dataset, as well dataspace information (e.g. rank and dimensions). There are several "get" routines for obtaining this information. The following code segment illustrates how we would get this kind of information: -

/*
-* Get datatype and dataspace identifiers and then query
-* dataset class, order, size, rank and dimensions.
-*/
-
-datatype  = H5Dget_type(dataset);     /* datatype identifier */ 
-class     = H5Tget_class(datatype);
-if (class == H5T_INTEGER) printf("Data set has INTEGER type \n");
-order     = H5Tget_order(datatype);
-if (order == H5T_ORDER_LE) printf("Little endian order \n");
-
-size  = H5Tget_size(datatype);
-printf(" Data size is %d \n", size);
-
-dataspace = H5Dget_space(dataset);    /* dataspace identifier */
-rank      = H5Sget_simple_extent_ndims(dataspace);
-status_n  = H5Sget_simple_extent_dims(dataspace, dims_out);
-printf("rank %d, dimensions %d x %d \n", rank, dims_out[0], dims_out[1]);
- - - -

(Return to TOC) - - -

Reading and writing a portion of a dataset

-

In the previous discussion, we describe how to access an entire dataset with one write (or read) operation. HDF5 also supports access to portions (or selections) of a dataset in one read/write operation. Currently selections are limited to hyperslabs, their unions, and the lists of independent points. Both types of selection will be discussed in the following sections. Several sample cases of selection reading/writing are shown on the following figure. -

- - -
- -
-a  -
-b  -
-c  -
-d  -
- -
-
-

In example (a) a single hyperslab is read from the midst of a two-dimensional array in a file and stored in the corner of a smaller two-dimensional array in memory. In (b) a regular series of blocks is read from a two-dimensional array in the file and stored as a contiguous sequence of values at a certain offset in a one-dimensional array in memory. In (c) a sequence of points with no regular pattern is read from a two-dimensional array in a file and stored as a sequence of points with no regular pattern in a three-dimensional array in memory. -In (d) a union of hyperslabs in the file dataspace is read and -the data is stored in another union of hyperslabs in the memory dataspace. -

As these examples illustrate, whenever we perform partial read/write operations on the data, the following information must be provided: file dataspace, file dataspace selection, memory dataspace and memory dataspace selection. After the required information is specified, actual read/write operation on the portion of data is done in a single call to the HDF5 read/write functions H5Dread(write). - - -

Selecting hyperslabs
-

Hyperslabs are portions of datasets. A hyperslab selection can be a logically contiguous collection of points in a dataspace, or it can be regular pattern of points or blocks in a dataspace. The following picture illustrates a selection of regularly spaced 3x2 blocks in an 8x12 dataspace. -

- -

-Hyperslab selection
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
  -

X

-

X

  -

X

-

X

  -

X

-

X

  -

X

-

X

  -

X

-

X

  -

X

-

X

  -

X

-

X

  -

X

-

X

  -

X

-

X

  -

X

-

X

  -

X

-

X

  -

X

-

X

            
  -

X

-

X

  -

X

-

X

  -

X

-

X

  -

X

-

X

  -

X

-

X

  -

X

-

X

  -

X

-

X

  -

X

-

X

  -

X

-

X

  -

X

-

X

  -

X

-

X

  -

X

-

X

            
-
- -

Four parameters are required to describe a completely general hyperslab. Each parameter is an array whose rank is the same as that of the dataspace: - -

    -
  • start: a starting location for the hyperslab. In the example start is (0,1). -
  • stride: the number of elements to separate each element or block to be selected. In the example stride is (4,3). If the stride parameter is set to NULL, the stride size defaults to 1 in each dimension. -
  • count: the number of elements or blocks to select along each dimension. In the example, count is (2,4). -
  • block: the size of the block selected from the dataspace. In the example, block is (3,2). If the block parameter is set to NULL, the block size defaults to a single element in each dimension, as if the block array was set to all 1s.
- -

In what order is data copied? When actual I/O is performed data values are copied by default from one dataspace to another in so-called row-major, or C order. That is, it is assumed that the first dimension varies slowest, the second next slowest, and so forth. -

Example without strides or blocks. Suppose we want to read a 3x4 hyperslab from a dataset in a file beginning at the element <1,2> in the dataset. In order to do this, we must create a dataspace that describes the overall rank and dimensions of the dataset in the file, as well as the position and size of the hyperslab that we are extracting from that dataset. The following code illustrates the selection of the hyperslab in the file dataspace. -

-/* 
-* Define file dataspace.
-*/
-dataspace = H5Dget_space(dataset);    /* dataspace identifier */
-rank      = H5Sget_simple_extent_ndims(dataspace);
-status_n  = H5Sget_simple_extent_dims(dataspace, dims_out, NULL);
-
-/* 
-* Define hyperslab in the dataset. 
-*/
-offset[0] = 1;
-offset[1] = 2;
-count[0]  = 3;
-count[1]  = 4;
-status = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, NULL, 
-         count, NULL);
-

This describes the dataspace from which we wish to read. We need to define the dataspace in memory analogously. Suppose, for instance, that we have in memory a 3 dimensional 7x7x3 array into which we wish to read the 3x4 hyperslab described above beginning at the element <3,0,0>. Since the in-memory dataspace has three dimensions, we have to describe the hyperslab as an array with three dimensions, with the last dimension being 1: <3,4,1>. -

Notice that we must describe two things: the dimensions of the in-memory array, and the size and position of the hyperslab that we wish to read in. The following code illustrates how this would be done. -

/*
-* Define memory dataspace.
-*/
-dimsm[0] = 7;
-dimsm[1] = 7;
-dimsm[2] = 3;
-memspace = H5Screate_simple(RANK_OUT,dimsm,NULL);   
-
-/* 
-* Define memory hyperslab. 
-*/
-offset_out[0] = 3;
-offset_out[1] = 0;
-offset_out[2] = 0;
-count_out[0]  = 3;
-count_out[1]  = 4;
-count_out[2]  = 1;
-status = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, offset_out, NULL, 
-         count_out, NULL);
-
-/*
-

Example 2 contains a complete program that performs these operations. -

Example with strides and blocks. Consider the 8x12 dataspace described above, in which we selected eight 3x2 blocks. Suppose we wish to fill these eight blocks. -

- -

-Hyperslab selection
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
  -

X

-

X

  -

X

-

X

  -

X

-

X

  -

X

-

X

  -

X

-

X

  -

X

-

X

  -

X

-

X

  -

X

-

X

  -

X

-

X

  -

X

-

X

  -

X

-

X

  -

X

-

X

            
  -

X

-

X

  -

X

-

X

  -

X

-

X

  -

X

-

X

  -

X

-

X

  -

X

-

X

  -

X

-

X

  -

X

-

X

  -

X

-

X

  -

X

-

X

  -

X

-

X

  -

X

-

X

            
-
- -

This hyperslab has the following parameters: start=(0,1), stride=(4,3), count=(2,4), block=(3,2). -

Suppose that the source dataspace in memory is this 50-element one dimensional array called vector: -

- -

-A 50-element one dimensional array
- - - - - - - - - - - - - - -
-

-1

-

1

-

2

-

3

-

4

-

5

-

6

-

7

-

...

-

47

-

48

-

-1

-
- -

The following code will write 48 elements from vector to our file dataset, starting with the second element in vector. -

-/* Select hyperslab for the dataset in the file, using 3x2 blocks, (4,3) stride
- * (2,4) count starting at the position (0,1).
- */
-start[0]  = 0; start[1]  = 1;
-stride[0] = 4; stride[1] = 3;
-count[0]  = 2; count[1]  = 4;    
-block[0]  = 3; block[1]  = 2;
-ret = H5Sselect_hyperslab(fid, H5S_SELECT_SET, start, stride, count, block);
-
-/*
- * Create dataspace for the first dataset.
- */
-mid1 = H5Screate_simple(MSPACE1_RANK, dim1, NULL);
-
-/*
- * Select hyperslab. 
- * We will use 48 elements of the vector buffer starting at the second element.
- * Selected elements are 1 2 3 . . . 48
- */
-start[0]  = 1;
-stride[0] = 1;
-count[0]  = 48;
-block[0]  = 1;
-ret = H5Sselect_hyperslab(mid1, H5S_SELECT_SET, start, stride, count, block);
- 
-/*
- * Write selection from the vector buffer to the dataset in the file.
- *
-ret = H5Dwrite(dataset, H5T_NATIVE_INT, midd1, fid, H5P_DEFAULT, vector)
-

  -

After these operations, the file dataspace will have the following values. -

- -

-Hyperslab selection with assigned values
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
  -

1

-

2

  -

3

-

4

  -

5

-

6

  -

7

-

8

  -

9

-

10

  -

11

-

12

  -

13

-

14

  -

15

-

16

  -

17

-

18

  -

19

-

20

  -

21

-

22

  -

23

-

24

            
  -

25

-

26

  -

27

-

28

  -

29

-

30

  -

31

-

32

  -

33

-

34

  -

35

-

36

  -

37

-

38

  -

39

-

40

  -

41

-

42

  -

43

-

44

  -

45

-

46

  -

47

-

48

            
-
- -

Notice that the values are inserted in the file dataset in row-major order. -

Example 3 includes this code and other example code illustrating the use of hyperslab selection. - - -

Selecting a list of independent points
-A hyperslab specifies a regular pattern of elements in a dataset. It is also possible to specify a list of independent elements to read or write using the function H5Sselect_elements. Suppose, for example, that we wish to write the values 53, 59, 61, 67 to the following elements of the 8x12 array used in the previous example: (0,0), (3,3), (3,5), and (5,6). The following code selects the points and writes them to the dataset: -
-#define FSPACE_RANK      2    /* Dataset rank as it is stored in the file */
-#define NPOINTS          4    /* Number of points that will be selected 
-                                 and overwritten */ 
-#define MSPACE2_RANK     1    /* Rank of the second dataset in memory */ 
-#define MSPACE2_DIM      4    /* Dataset size in memory */ 
-
- 
-hsize_t dim2[] = {MSPACE2_DIM};       /* Dimension size of the second 
-                                         dataset (in memory) */ 
-int     values[] = {53, 59, 61, 67};  /* New values to be written */
-hsize_t coord[NPOINTS][FSPACE_RANK];  /* Array to store selected points 
-                                         from the file dataspace */ 
-
-/*
- * Create dataspace for the second dataset.
- */
-mid2 = H5Screate_simple(MSPACE2_RANK, dim2, NULL);
-
-/*
- * Select sequence of NPOINTS points in the file dataspace.
- */
-coord[0][0] = 0; coord[0][1] = 0;
-coord[1][0] = 3; coord[1][1] = 3;
-coord[2][0] = 3; coord[2][1] = 5;
-coord[3][0] = 5; coord[3][1] = 6;
-
-ret = H5Sselect_elements(fid, H5S_SELECT_SET, NPOINTS, 
-                         (const hsize_t **)coord);
-
-/*
- * Write new selection of points to the dataset.
- */
-ret = H5Dwrite(dataset, H5T_NATIVE_INT, mid2, fid, H5P_DEFAULT, values);   
-
- -

  -

After these operations, the file dataspace will have the following values: -

- -

-Hyperslab selection with an overlay of independent points
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-

53

-

1

-

2

  -

3

-

4

  -

5

-

6

  -

7

-

8

  -

9

-

10

  -

11

-

12

  -

13

-

14

  -

15

-

16

  -

17

-

18

  -

19

-

20

  -

21

-

22

  -

23

-

24

    -

59

  -

61

      
  -

25

-

26

  -

27

-

28

  -

29

-

30

  -

31

-

32

  -

33

-

34

  -

35

-

36

-

67

-

37

-

38

  -

39

-

40

  -

41

-

42

  -

43

-

44

  -

45

-

46

  -

47

-

48

            
-
- -

Example 3 contains a complete program that performs these subsetting operations. - - -

Selecting a union of hyperslabs
- - -The HDF5 Library allows the user to select a union of hyperslabs and -write or read the selection into another selection. The shapes of -the two selections may differ, but the number of elements must be equal. -

-Suppose that we want to read two overlapping hyperslabs from the dataset -written in the previous example into a union of hyperslabs in the memory -dataset. This exercise is illustrated in the two figures immediately below. -Note that the memory dataset has a different shape from the previously -written dataset. Similarly, the selection in the memory dataset -could have a different shape than the selected union of hyperslabs in -the original file; for simplicity, we will preserve the selection's shape -in this example. -

- -

-Selection of a union of hyperslabs in a file dataset
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-

53

-

1

-

2

  -

3

-

4

  -

5

-

6

  -

7

-

8

  -

9

-

10

  -

11

-

12

  -

13

-

14

  -

15

-

16

  -

17

-

18

  -

19

-

20

  -

21

-

22

  -

23

-

24

    -

59

  -

61

      
  -

25

-

26

  -

27

-

28

  -

29

-

30

  -

31

-

32

  -

33

-

34

  -

35

-

36

-

67

-

37

-

38

  -

39

-

40

  -

41

-

42

  -

43

-

44

  -

45

-

46

  -

47

-

48

            
-(Note: The above table highlights hyperslab selections -with green, blue, and yellow
shading. This shading may not -appear properly in black-and-white printed copies.)
-
- - -

- -

-Selection of a union of hyperslabs in a memory dataset
-Blank cells in this figure actually contain values written -when the dataset was initialized. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-

10

  -

11

-

12

     
-

18

  -

19

-

20

  -

21

-

22

  
  -

59

  -

61

     
   -

27

-

28

  -

29

-

30

  
   -

35

-

36

-

67

-

37

-

38

  
   -

43

-

44

  -

45

-

46

  
         
         
-(Note: The above table highlights hyperslab selections -with green, blue, and yellow
shading. This shading may not -appear properly in black-and-white printed copies.)
-
- - -

-The following lines of code show the required steps. -

-First obtain the dataspace identifier for the dataset in the file. - -

-    /*
-     * Get dataspace of the open dataset.
-     */  
-    fid = H5Dget_space(dataset);
-
- -Then select the hyperslab with the size 3x4 and -the left upper corner at the position (1,2): - -
-    /*
-     * Select first hyperslab for the dataset in the file. The following
-     * elements are selected:
-     *                     10  0 11 12
-     *                     18  0 19 20
-     *                      0 59  0 61
-     *   
-     */ 
-    start[0] = 1; start[1] = 2;
-    block[0] = 1; block[1] = 1;
-    stride[0] = 1; stride[1] = 1;
-    count[0]  = 3; count[1]  = 4;
-    ret = H5Sselect_hyperslab(fid, H5S_SELECT_SET, start, stride, count, block); 
-
- -Now select the second hyperslab with the size 6x5 at the position (2,4), -and create the union with the first hyperslab. - -
-    /*
-     * Add second selected hyperslab to the selection.
-     * The following elements are selected:
-     *                    19 20  0 21 22
-     *                     0 61  0  0  0
-     *                    27 28  0 29 30
-     *                    35 36 67 37 38
-     *                    43 44  0 45 46
-     *                     0  0  0  0  0
-     * Note that two hyperslabs overlap. Common elements are:
-     *                                              19 20
-     *                                               0 61
-     */
-    start[0] = 2; start[1] = 4;
-    block[0] = 1; block[1] = 1;
-    stride[0] = 1; stride[1] = 1;
-    count[0]  = 6; count[1]  = 5;
-    ret = H5Sselect_hyperslab(fid, H5S_SELECT_OR, start, stride, count, block);
-
- -Note that when we add the selected hyperslab to the union, the -second argument to the H5Sselect_hyperslab function -has to be H5S_SELECT_OR instead of H5S_SELECT_SET. -Using H5S_SELECT_SET would reset the selection to -the second hyperslab. -

-Now define the memory dataspace and select the union of the hyperslabs -in the memory dataset. - -

-    /*
-     * Create memory dataspace.
-     */
-    mid = H5Screate_simple(MSPACE_RANK, mdim, NULL);
-     
-    /*
-     * Select two hyperslabs in memory. Hyperslabs has the same
-     * size and shape as the selected hyperslabs for the file dataspace.
-     */
-    start[0] = 0; start[1] = 0;
-    block[0] = 1; block[1] = 1;
-    stride[0] = 1; stride[1] = 1;
-    count[0]  = 3; count[1]  = 4;
-    ret = H5Sselect_hyperslab(mid, H5S_SELECT_SET, start, stride, count, block);     
-    start[0] = 1; start[1] = 2;
-    block[0] = 1; block[1] = 1;
-    stride[0] = 1; stride[1] = 1;
-    count[0]  = 6; count[1]  = 5;
-    ret = H5Sselect_hyperslab(mid, H5S_SELECT_OR, start, stride, count, block);
-
- -Finally we can read the selected data from the file dataspace to the selection -in memory with one call to the H5Dread function. - -
    ret = H5Dread(dataset, H5T_NATIVE_INT, mid, fid, H5P_DEFAULT, matrix_out);
-
- -

-Example 3 -includes this code along with the previous selection example. - - - - -

(Return to TOC) - - -

Creating variable-length datatypes

- -Variable-length (VL) datatypes are sequences of an existing datatype -(atomic, VL, or compound) which are not fixed in length from one dataset location -to another. In essence, they are similar to C character strings -- a sequence of -a type which is pointed to by a particular type of pointer -- although -they are implemented more closely to FORTRAN strings by including an explicit -length in the pointer instead of using a particular value to terminate the -sequence. - -

-VL datatypes are useful to the scientific community in many different ways, -some of which are listed below: -

    -
  • Ragged arrays: Multi-dimensional ragged arrays can be implemented with - the last (fastest changing) dimension being ragged by using a - VL datatype as the type of the element stored. (Or as a field in a - compound datatype.) -
  • Fractal arrays: If a compound datatype has a VL field of another compound - type with VL fields (a nested VL datatype), this can be used to - implement ragged arrays of ragged arrays, to whatever nesting depth is - required for the user. -
  • Polygon lists: A common storage requirement is to efficiently store arrays - of polygons with different numbers of vertices. VL datatypes can be - used to efficiently and succinctly describe an array of polygons with - different numbers of vertices. -
  • Character strings: Perhaps the most common use of VL datatypes will be to - store C-like VL character strings in dataset elements or as attributes - of objects. -
  • Indices: An array of VL object references could be used as an index to - all the objects in a file which contain a particular sequence of - dataset values. Perhaps an array something like the following: -
    -            Value1: Object1, Object3,  Object9
    -            Value2: Object0, Object12, Object14, Object21, Object22
    -            Value3: Object2
    -            Value4: <none>
    -            Value5: Object1, Object10, Object12
    -                .
    -                .
    -        
    -
  • Object Tracking: An array of VL dataset region references can be used as - a method of tracking objects or features appearing in a sequence of - datasets. Perhaps an array of them would look like: -
    -            Feature1: Dataset1:Region,  Dataset3:Region,  Dataset9:Region
    -            Feature2: Dataset0:Region,  Dataset12:Region, Dataset14:Region,
    -                      Dataset21:Region, Dataset22:Region
    -            Feature3: Dataset2:Region
    -            Feature4: <none>
    -            Feature5: Dataset1:Region,  Dataset10:Region, Dataset12:Region
    -                .
    -                .
    -        
    -
- - -

Variable-length datatype memory management

- -With each element possibly being of different sequence lengths for a -dataset with a VL datatype, the memory for the VL datatype must be dynamically -allocated. Currently there are two methods of managing the memory for -VL datatypes: the standard C malloc/free memory allocation routines or a method -of calling user-defined memory management routines to allocate or free memory. -Since the memory allocated when reading (or writing) may be complicated to -release, an HDF5 routine is provided to traverse a memory buffer and free the -VL datatype information without leaking memory. - - -
Variable-length datatypes cannot be divided
- -VL datatypes are designed so that they cannot be subdivided by the library -with selections, etc. This design was chosen due to the complexities in -specifying selections on each VL element of a dataset through a selection API -that is easy to understand. Also, the selection APIs work on dataspaces, not -on datatypes. At some point in time, we may want to create a way for -dataspaces to have VL components to them and we would need to allow selections -of those VL regions, but that is beyond the scope of this document. - - -
What happens if the library runs out of memory while reading?
- -It is possible for a call to H5Dread to fail while reading in -VL datatype information if the memory required exceeds that which is available. -In this case, the H5Dread call will fail gracefully and any -VL data which has been allocated prior to the memory shortage will be returned -to the system via the memory management routines detailed below. -It may be possible to design a partial read API function at a -later date, if demand for such a function warrants. - - -
Strings as variable-length datatypes
- -Since character strings are a special case of VL data that is implemented -in many different ways on different machines and in different programming -languages, they are handled somewhat differently from other VL datatypes in HDF5. - -

-HDF5 has native VL strings for each language API, which are stored the -same way on disk, but are exported through each language API in a natural way -for that language. When retrieving VL strings from a dataset, users may choose -to have them stored in memory as a native VL string or in HDF5's hvl_t -struct for VL datatypes. - -

-VL strings may be created in one of two ways: by creating a VL datatype with -a base type of H5T_NATIVE_ASCII, H5T_NATIVE_UNICODE, -etc., or by creating a string datatype and setting its length to -H5T_VARIABLE. The second method is used to access -native VL strings in memory. The library will convert between the two types, -but they are stored on disk using different datatypes and have different -memory representations. - -

-Multi-byte character representations, such as UNICODE or wide -characters in C/C++, will need the appropriate character and string datatypes -created so that they can be described properly through the datatype API. -Additional conversions between these types and the current ASCII characters -will also be required. - -

-Variable-width character strings (which might be compressed data or some -other encoding) are not currently handled by this design. We will evaluate -how to implement them based on user feedback. - - -

Variable-length datatype APIs

- -
Creation
- -VL datatypes are created with the H5Tvlen_create() function -as follows: -
-
type_id = H5Tvlen_create(hid_t base_type_id); -
- -

-The base datatype will be the datatype that the sequence is composed of, -characters for character strings, vertex coordinates for polygon lists, etc. -The base datatype specified for the VL datatype can be of any HDF5 datatype, -including another VL datatype, a compound datatype, or an atomic datatype. - - -

Querying base datatype of VL datatype
- -It may be necessary to know the base datatype of a VL datatype before -memory is allocated, etc. The base datatype is queried with the -H5Tget_super() function, described in the H5T documentation. - - -
Querying minimum memory required for VL information
- -It order to predict the memory usage that H5Dread may need -to allocate to store VL data while reading the data, the -H5Dget_vlen_size() function is provided: -
-
herr_t - H5Dvlen_get_buf_size(hid_t dataset_id, - hid_t type_id, - hid_t space_id, - hsize_t *size) -
- -

-This routine checks the number of bytes required to store the VL data from -the dataset, using the space_id for the selection in the dataset -on disk and the type_id for the memory representation of the -VL data in memory. The *size value is modified according to -how many bytes are required to store the VL data in memory. - - -

Specifying how to manage memory for the VL datatype
- -The memory management method is determined by dataset transfer properties -passed into the H5Dread and H5Dwrite functions -with the dataset transfer property list. - -

-Default memory management is set by using H5P_DEFAULT -for the dataset transfer property list identifier. -If H5P_DEFAULT is used with H5Dread, -the system malloc and free calls -will be used for allocating and freeing memory. -In such a case, H5P_DEFAULT should also be passed -as the property list identifier to H5Dvlen_reclaim. - -

-The rest of this subsection is relevant only to those who choose -not to use default memory management. - -

-The user can choose whether to use the -system malloc and free calls or -user-defined, or custom, memory management functions. -If user-defined memory management functions are to be used, -the memory allocation and free routines must be defined via -H5Pset_vlen_mem_manager(), as follows: -

-
herr_t - H5Pset_vlen_mem_manager(hid_t plist_id, - H5MM_allocate_t alloc, - void *alloc_info, - H5MM_free_t free, - void *free_info) -
- - -

-The alloc and free parameters -identify the memory management routines to be used. -If the user has defined custom memory management routines, -alloc and/or free should be set to make -those routine calls (i.e., the name of the routine is used as -the value of the parameter); -if the user prefers to use the system's malloc -and/or free, the alloc and -free parameters, respectively, should be set to - NULL -

-The prototypes for the user-defined functions would appear as follows: -

-
typedef void - *(*H5MM_allocate_t)(size_t size, - void *info) ; -
typedef void - (*H5MM_free_t)(void *mem, - void *free_info) ; -
- -

-The alloc_info and free_info parameters can be -used to pass along any required information to the user's memory management -routines. - -

-In summary, if the user has defined custom memory management -routines, the name(s) of the routines are passed in the -alloc and free parameters and the -custom routines' parameters are passed in the -alloc_info and free_info parameters. -If the user wishes to use the system malloc and -free functions, the alloc and/or -free parameters are set to NULL -and the alloc_info and free_info -parameters are ignored. - -

Recovering memory from VL buffers read in
- -The complex memory buffers created for a VL datatype may be reclaimed with -the H5Dvlen_reclaim() function call, as follows: -
-
herr_t - H5Dvlen_reclaim(hid_t type_id, - hid_t space_id, - hid_t plist_id, - void *buf); -
- -

-The type_id must be the datatype stored in the buffer, -space_id describes the selection for the memory buffer -to free the VL datatypes within, -plist_id is the dataset transfer property list which -was used for the I/O transfer to create the buffer, and -buf is the pointer to the buffer to free the VL memory within. -The VL structures (hvl_t) in the user's buffer are -modified to zero out the VL information after it has been freed. - -

-If nested VL datatypes were used to create the buffer, -this routine frees them from the bottom up, -releasing all the memory without creating memory leaks. - -

-Example 4 -creates a dataset with the variable-length datatype using user-defined -functions for memory management. - - -

(Return to TOC) - - -

Creating array datatypes

- -The array class of datatypes, H5T_ARRAY, allows the -construction of true, homogeneous, multi-dimensional arrays. -Since these are homogeneous arrays, each element of the array will be -of the same datatype, designated at the time the array is created. - -

-Arrays can be nested. -Not only is an array datatype used as an element of an HDF5 dataset, -but the elements of an array datatype may be of any datatype, -including another array datatype. - -

-Array datatypes cannot be subdivided for I/O; the entire array must -be transferred from one dataset to another. - -

-Within certain limitations, outlined in the next paragraph, array datatypes -may be N-dimensional and of any dimension size. -Unlimited dimensions, however, are not supported. -Functionality similar to unlimited dimension arrays is available through -the use of variable-length datatypes. - -

-The maximum number of dimensions, i.e., the maximum rank, of an array -datatype is specified by the HDF5 library constant H5S_MAX_RANK. -The minimum rank is 1 (one). -All dimension sizes must be greater than 0 (zero). - -

-One array dataype may only be converted to another array datatype -if the number of dimensions and the sizes of the dimensions are equal -and the datatype of the first array's elements can be converted -to the datatype of the second array's elements. - -

Array datatype APIs

- -There are three functions that are specific to array datatypes: -one, H5Tarray_create, for creating an array datatype, -and two, H5Tget_array_ndims and H5Tget_array_dims, -for working with existing array datatypes. - -
Creating
- -The function H5Tarray_create creates a new array datatype object. -Parameters specify -
    -
  • the base datatype of each element of the array, -
  • the rank of the array, i.e., the number of dimensions, -
  • the size of each dimension, and -
  • the dimension permutation of the array, i.e., whether the - elements of the array are listed in C or FORTRAN order. - (Note: The permutation feature is not implemented in Release 1.4.) -
- - - hid_t H5Tarray_create( - hid_t base, - int rank, - const hsize_t dims[/*rank*/], - const int perm[/*rank*/] - ) - - -
Working with existing array datatypes
- -When working with existing arrays, one must first determine the -the rank, or number of dimensions, of the array. - -

-The function H5Tget_array_ndims returns the rank of a -specified array datatype. - -

- int H5Tget_array_ndims( - hid_t adtype_id - ) - - -In many instances, one needs further information. -The function H5Tget_array_dims retrieves the -permutation of the array and the size of each dimension. -(Note: The permutation feature is not implemented in Release 1.4.) - - - int H5Tget_array_dims( - hid_t adtype_id, - hsize_t *dims[], - int *perm[] - ) - - - -

-Example 5 -creates an array datatype and a dataset containing elements of the -array datatype in an HDF5 file. It then writes the dataset to the file. - - -

(Return to TOC) - - -

Creating compound datatypes

-

Properties of compound datatypes. A compound datatype is similar to a struct in C or a common block in Fortran. It is a collection of one or more atomic types or small arrays of such types. To create and use of a compound datatype you need to refer to various properties of the data compound datatype: - -

    -
  • It is of class compound. -
  • It has a fixed total size, in bytes. -
  • It consists of zero or more members (defined in any order) with unique names and which occupy non-overlapping regions within the datum. -
  • Each member has its own datatype. -
  • Each member is referenced by an index number between zero and N-1, where N is the number of members in the compound datatype. -
  • Each member has a name which is unique among its siblings in a compound datatype. -
  • Each member has a fixed byte offset, which is the first byte (smallest byte address) of that member in a compound datatype. -
  • Each member can be a small array of up to four dimensions.
- -

Properties of members of a compound datatype are defined when the member is added to the compound type and cannot be subsequently modified. -

Defining compound datatypes. Compound datatypes must be built out of other datatypes. First, one creates an empty compound datatype and specifies its total size. Then members are added to the compound datatype in any order. -

Member names. Each member must have a descriptive name, which is the key used to uniquely identify the member within the compound datatype. A member name in an HDF5 datatype does not necessarily have to be the same as the name of the corresponding member in the C struct in memory, although this is often the case. Nor does one need to define all members of the C struct in the HDF5 compound datatype (or vice versa). -

Offsets. Usually a C struct will be defined to hold a data point in memory, and the offsets of the members in memory will be the offsets of the struct members from the beginning of an instance of the struct. The library defines the macro to compute the offset of a member within a struct: -
  HOFFSET(s,m)
-
This macro computes the offset of member m within a struct variable s. -

Here is an example in which a compound datatype is created to describe complex numbers whose type is defined by the complex_t struct. -

typedef struct {
-   double re;   /*real part */
-   double im;   /*imaginary part */
-} complex_t;
-
-complex_t tmp;  /*used only to compute offsets */
-hid_t complex_id = H5Tcreate (H5T_COMPOUND, sizeof tmp);
-H5Tinsert (complex_id, "real", HOFFSET(tmp,re),
-           H5T_NATIVE_DOUBLE);
-H5Tinsert (complex_id, "imaginary", HOFFSET(tmp,im),
-           H5T_NATIVE_DOUBLE);
-

Example 6 shows how to create a compound datatype, write an array that has the compound datatype to the file, and read back subsets of the members. - - - -

(Return to TOC) - - -

Creating and writing extendible and chunked datasets

-

An extendible dataset is one whose dimensions can grow. In HDF5, it is possible to define a dataset to have certain initial dimensions, then later to increase the size of any of the initial dimensions. -

For example, you can create and store the following 3x3 HDF5 dataset: -

     1 1 1
-     1 1 1 
-     1 1 1 
-

then later to extend this into a 10x3 dataset by adding 7 rows, such as this: -

     1 1 1 
-     1 1 1 
-     1 1 1 
-     2 2 2
-     2 2 2
-     2 2 2
-     2 2 2
-     2 2 2
-     2 2 2
-     2 2 2
-

then further extend it to a 10x5 dataset by adding two columns, such as this: -

     1 1 1 3 3 
-     1 1 1 3 3 
-     1 1 1 3 3 
-     2 2 2 3 3
-     2 2 2 3 3
-     2 2 2 3 3
-     2 2 2 3 3
-     2 2 2 3 3
-     2 2 2 3 3
-     2 2 2 3 3
-

HDF 5 requires you to use chunking in order to define extendible datasets. Chunking makes it possible to extend datasets efficiently, without having to reorganize storage excessively. -

The following operations are required in order to write an extendible dataset: -

    - -
  1. Declare the dataspace of the dataset to have unlimited dimensions for all dimensions that might eventually be extended. -
  2. Set dataset creation properties to enable chunking and create a dataset. -
  3. Extend the size of the dataset.
- -

For example, suppose we wish to create a dataset similar to the one shown above. We want to start with a 3x3 dataset, then later extend it in both directions. -

Declaring unlimited dimensions. We could declare the dataspace to have unlimited dimensions with the following code, which uses the predefined constant H5S_UNLIMITED to specify unlimited dimensions. -

hsize_t dims[2] = { 3, 3}; /* dataset dimensions
-at the creation time */ 
-hsize_t maxdims[2] = {H5S_UNLIMITED, H5S_UNLIMITED};
-/*
- * Create the data space with unlimited dimensions. 
- */
-dataspace = H5Screate_simple(RANK, dims, maxdims); 
-

Enabling chunking. We can then set the dataset storage layout properties to enable chunking. We do this using the routine H5Pset_chunk: -

hid_t cparms; 
-hsize_t chunk_dims[2] ={2, 5};
-/* 
- * Modify dataset creation properties to enable chunking.
- */
-cparms = H5Pcreate (H5P_DATASET_CREATE);
-status = H5Pset_chunk( cparms, RANK, chunk_dims);
-
- -Then create a dataset. -
-/*
- * Create a new dataset within the file using cparms
- * creation properties.
- */
-dataset = H5Dcreate(file, DATASETNAME, H5T_NATIVE_INT, dataspace,
-                 cparms);
-
- -

Extending dataset size. Finally, when we want to extend the size of the dataset, we invoke H5Dextend to extend the size of the dataset. In the following example, we extend the dataset along the first dimension, by seven rows, so that the new dimensions are <10,3>: -

/*
- * Extend the dataset. Dataset becomes 10 x 3.
- */
-dims[0] = dims[0] + 7;
-size[0] = dims[0]; 
-size[1] = dims[1]; 
-status = H5Dextend (dataset, size);
-

  -

Example 7 shows how to create a 3x3 extendible dataset, write the dataset, extend the dataset to 10x3, write the dataset again, extend it again to 10x5, write the dataset again. -

Example 8 shows how to read the data written by Example 7. - - -

(Return to TOC) - - -

Working with groups in a file

-

Groups provide a mechanism for organizing meaningful and extendible sets of datasets within an HDF5 file. The H5G API contains routines for working with groups. -

Creating a group. To create a group, use -H5Gcreate. For example, the following code -creates a group called Data in the root group. -

- /*
-  *  Create a group in the file.
-  */
- grp = H5Gcreate(file, "/Data", 0);
-
-A group may be created in another group by providing the -absolute name of the group to the H5Gcreate -function or by specifying its location. For example, -to create the group Data_new in the -Data group, one can use the following sequence -of calls: -
- /*
-  * Create group "Data_new" in the group "Data" by specifying
-  * absolute name of the group.
-  */
- grp_new = H5Gcreate(file, "/Data/Data_new", 0);
-
-or -
- /*
-  * Create group "Data_new" in the "Data" group.
-  */
- grp_new = H5Gcreate(grp, "Data_new", 0);
-
-Note that the group identifier grp is used -as the first parameter in the H5Gcreate function -when the relative name is provided. -

-The third parameter in H5Gcreate optionally -specifies how much file space to reserve to store the names -that will appear in this group. If a non-positive -value is supplied, then a default size is chosen. -

-H5Gclose closes the group and releases the -group identifier. -

- -Creating a dataset in a particular group. -As with groups, a dataset can be created in a particular -group by specifying its absolute name as illustrated in -the following example: - -

 
- /*
-  * Create the dataset "Compressed_Data" in the group using the 
-  * absolute name. The dataset creation property list is modified 
-  * to use GZIP compression with the compression effort set to 6.
-  * Note that compression can be used only when the dataset is 
-  * chunked.
-  */
- dims[0] = 1000;
- dims[1] = 20;
- cdims[0] = 20;
- cdims[1] = 20;
- dataspace = H5Screate_simple(RANK, dims, NULL);
- plist     = H5Pcreate(H5P_DATASET_CREATE);
-             H5Pset_chunk(plist, 2, cdims);
-             H5Pset_deflate( plist, 6);
- dataset = H5Dcreate(file, "/Data/Compressed_Data", H5T_NATIVE_INT,
-                     dataspace, plist);
-
-A relative dataset name may also be used when a dataset is -created. First obtain the identifier of the group in which -the dataset is to be created. Then create the dataset -with H5Dcreate as illustrated in the following -example: -
- /* 
-  * Open the group.
-  */
- grp = H5Gopen(file, "Data");
-
- /*
-  * Create the dataset "Compressed_Data" in the "Data" group
-  * by providing a group identifier and a relative dataset 
-  * name as parameters to the H5Dcreate function.
-  */
- dataset = H5Dcreate(grp, "Compressed_Data", H5T_NATIVE_INT,
-                     dataspace, plist);
-
-

- -Accessing an object in a group. -Any object in a group can be accessed by its absolute or -relative name. The following lines of code show how to use -the absolute name to access the dataset -Compressed_Data in the group Data -created in the examples above: -

-  /*
-   * Open the dataset "Compressed_Data" in the "Data" group. 
-   */
-  dataset = H5Dopen(file, "/Data/Compressed_Data");
-
-The same dataset can be accessed in another manner. First -access the group to which the dataset belongs, then open -the dataset. -
-  /*
-   * Open the group "data" in the file.
-   */
-  grp  = H5Gopen(file, "Data");
- 
-  /*
-   * Access the "Compressed_Data" dataset in the group.
-   */
-  dataset = H5Dopen(grp, "Compressed_Data");
-
- -

-Example 9 -shows how to create a group in a file and a -dataset in a group. It uses the iterator function -H5Giterate to find the names of the objects -in the root group, and H5Glink and H5Gunlink -to create a new group name and delete the original name. - - -

(Return to TOC) - - -

Working with attributes

-

Think of an attribute as a small datasets that is attached to a normal dataset or group. The H5A API contains routines for working with attributes. Since attributes share many of the characteristics of datasets, the programming model for working with attributes is analogous in many ways to the model for working with datasets. The primary differences are that an attribute must be attached to a dataset or a group, and subsetting operations cannot be performed on attributes. -

To create an attribute belonging to a particular dataset or group, first create a dataspace for the attribute with the call to H5Screate, then create the attribute using H5Acreate. For example, the following code creates an attribute called Integer_attribute that is a member of a dataset whose identifier is dataset. The attribute identifier is attr2. H5Awrite then sets the value of the attribute of that of the integer variable point. H5Aclose then releases the attribute identifier. - - -

-int point = 1;                         /* Value of the scalar attribute */ 
-
-/*
- * Create scalar attribute.
- */
-aid2  = H5Screate(H5S_SCALAR);
-attr2 = H5Acreate(dataset, "Integer attribute", H5T_NATIVE_INT, aid2,
-                  H5P_DEFAULT);
-
-/*
- * Write scalar attribute.
- */
-ret = H5Awrite(attr2, H5T_NATIVE_INT, &point); 
-
-/*
- * Close attribute dataspace.
- */
-ret = H5Sclose(aid2); 
-
-/*
- * Close attribute.
- */
-ret = H5Aclose(attr2); 
-
-

  -

To read a scalar attribute whose name and datatype are known, first open the attribute using H5Aopen_name, then use H5Aread to get its value. For example the following reads a scalar attribute called Integer_attribute whose datatype is a native integer, and whose parent dataset has the identifier dataset. -

-/*
- * Attach to the scalar attribute using attribute name, then read and 
- * display its value.
- */
-attr = H5Aopen_name(dataset,"Integer attribute");
-ret  = H5Aread(attr, H5T_NATIVE_INT, &point_out);
-printf("The value of the attribute \"Integer attribute\" is %d \n", point_out); 
-ret =  H5Aclose(attr);
-
-

Reading an attribute whose characteristics are not known. It may be necessary to query a file to obtain information about an attribute, namely its name, datatype, rank and dimensions. The following code opens an attribute by its index value using H5Aopen_index, then reads in information about its datatype. - -

-/*
- * Attach to the string attribute using its index, then read and display the value.
- */
-attr =  H5Aopen_idx(dataset, 2);
-atype = H5Tcopy(H5T_C_S1);
-        H5Tset_size(atype, 4);
-ret   = H5Aread(attr, atype, string_out);
-printf("The value of the attribute with the index 2 is %s \n", string_out);
-
- -

In practice, if the characteristics of attributes are not known, -the code involved in accessing and processing the attribute can be quite -complex. For this reason, HDF5 includes a function called -H5Aiterate, which applies a user-supplied function to each -of a set of attributes. The user-supplied function can contain the code -that interprets, accesses and processes each attribute. -

-Example 10 illustrates the use of the H5Aiterate function, as well as the other attribute examples described above. - - -

(Return to TOC) - - -

Working with references to objects

- -In HDF5, objects (i.e. groups, datasets, and named datatypes) are usually -accessed by name. This access method was discussed in previous sections. -There is another way to access stored objects -- by reference. -

-An object reference is based on the relative file address of the object header -in the file and is constant for the life of the object. Once a reference to -an object is created and stored in a dataset in the file, it can be used -to dereference the object it points to. References are handy for creating -a file index or for grouping related objects by storing references to them in -one dataset. -

- -

Creating and storing references to objects

-The following steps are involved in creating and storing file references -to objects: -
    -
  1. Create the objects or open them if they already exist in the file. -
  2. Create a dataset to store the objects' references. -
  3. Create and store references to the objects in a buffer. -
  4. Write a buffer with the references to the dataset. -
- - -
Programming example
-Example 11 -creates a group and two datasets and a named datatype in the group. -References to these four objects are stored in the dataset in the -root group. - -

-Notes: -Note the following elements of this example: -

    -
  • The following code, -
    -    dataset = H5Dcreate ( fid1,"Dataset3",H5T_STD_REF_OBJ,sid1,H5P_DEFAULT );
    -
    - creates a dataset to store references. Notice that the - H5T_SDT_REF_OBJ datatype is used to specify that - references to objects will be stored. - The datatype H5T_STD_REF_DSETREG is used to store the - dataset region references and is be discussed later. -
  • The next few calls to the H5Rcreate function create - references to the objects and store them in the buffer wbuf. - The signature of the H5Rcreate function is: -
    -   herr_t H5Rcreate ( void* buf, hid_t loc_id, const char *name, 
    -                      H5R_type_t ref_type, hid_t space_id )    
    -
    -
      -
    • The first argument specifies the buffer to store the reference. -
    • The second and third arguments specify the name of the referenced - object. In the example, the file identifier fid1 and - absolute name of the dataset /Group1/Dataset1 - identify the dataset. One could also use the group identifier - of group Group1 and the relative name of the dataset - Dataset1 to create the same reference. -
    • The fourth argument specifies the type of the reference. - The example uses references to the objects (H5R_OBJECT). - Another type of reference, reference to the dataset region - (H5R_DATASET_REGION), is discussed later. -
    • The fifth argument specifies the space identifier. When references - to the objects are created, it should be set to -1. -
    -
  • The H5Dwrite function writes a dataset with the - references to the file. Notice that the H5T_SDT_REF_OBJ - datatype is used to describe the dataset's memory datatype. -
- -Output file contents: -The contents of the trefer1.h5 file created by this example -are as follows: -
-
-HDF5 "trefer1.h5" {
-GROUP "/" {
-   DATASET "Dataset3" {
-      DATATYPE { H5T_REFERENCE }
-      DATASPACE { SIMPLE ( 4 ) / ( 4 ) }
-      DATA {
-         DATASET 0:1696, DATASET 0:2152, GROUP 0:1320, DATATYPE 0:2268
-      }
-   }
-   GROUP "Group1" {
-      DATASET "Dataset1" {
-         DATATYPE { H5T_STD_U32LE }
-         DATASPACE { SIMPLE ( 4 ) / ( 4 ) }
-         DATA {
-            0, 3, 6, 9
-         }
-      }
-      DATASET "Dataset2" {
-         DATATYPE { H5T_STD_U8LE }
-         DATASPACE { SIMPLE ( 4 ) / ( 4 ) }
-         DATA {
-            0, 0, 0, 0
-         }
-      }
-      DATATYPE "Datatype1" {
-         H5T_STD_I32BE "a";
-         H5T_STD_I32BE "b";
-         H5T_IEEE_F32BE "c";
-      }
-   }
-}
-}
-
-
-Notice how the data in dataset Dataset3 is described. -The two numbers with the colon in between represent a unique identifier -of the object. These numbers are constant for the life of the object. - - -

Reading references and accessing objects using references

- -The following steps are involved: -
    -
  1. Open the dataset with the references and read them. - The H5T_STD_REF_OBJ datatype must be used to - describe the memory datatype. -
  2. Use the read reference to obtain the identifier of the object the - reference points to. -
  3. Open the dereferenced object and perform the desired operations. -
  4. Close all objects when the task is complete. -
- -
Programming example
- -Example 12 -opens and reads dataset Dataset3 from the file created created -in Example 11. Then the program dereferences the references -to dataset Dataset1, the group and the named datatype, -and opens those objects. -The program reads and displays the dataset's data, the group's comment, and -the number of members of the compound datatype. - -

-Output file contents: -The output of this program is as follows: - -

-
-Dataset data : 
- 0  3  6  9 
-
-Group comment is Foo! 
- 
-Number of compound datatype members is 3 
-
- - -

-Notes: -Note the following elements of this example: - -

    -
  • The H5Dread function was used to read dataset - Dataset3 containing the references to the objects. - The H5T_STD_REF_OBJ memory datatype was - used to read references to memory. -
  • H5Rdereference obtains the object's identifier. - The signature of this function is: -
    -         hid_t H5Rdereference (hid_t datatset, H5R_type_t ref_type, void *ref)
    -
    -
      -
    • The first argument is an identifier of the dataset with the - references. -
    • The second argument specifies the reference type. - H5R_OBJECT was used to specify a reference to an - object. Another type, used to specifiy a reference to a dataset - region and discussed later, is H5R_DATASET_REGION. -
    • The third argument is a buffer to store the reference to be read. -
    • The function returns an identifier of the object the reference - points to. In this simplified situation, the type that was - stored in the dataset is known. When the type of the object is - unknown, H5Rget_object_type should be used to - identify the type of object the reference points to. -
    -
- - -

(Return to TOC) - - -

Working with references to dataset regions

- -A dataset region reference points to the dataset selection by storing the -relative file address of the dataset header and the global heap offset of -the referenced selection. The selection referenced is located by retrieving -the coordinates of the areas in the selection from the global heap. This -internal mechanism of storing and retrieving dataset selections is transparent -to the user. A reference to the dataset selection (region) is constant for -the life of the dataset. - -

Creating and storing references to dataset regions

-The following steps are involved in creating and storing references to -the dataset regions: -
    - -
  1. Create a dataset to store the dataset regions (selections). -

    -

  2. Create selections in the dataset(s). Dataset(s) should already exist - in the file. -

    -

  3. Create references to the selections and store them in a buffer. -

    -

  4. Write references to the dataset regions in the file. -

    -

  5. Close all objects. -
- -
Programming example
- -Example 13 -creates a dataset in the file. Then it creates a dataset to store -references to the dataset regions (selections). -The first selection is a 6 x 6 hyperslab. -The second selection is a point selection in the same dataset. -References to both selections are created and stored in the buffer, -and then written to the dataset in the file. - -

-Notes: -Note the following elements of this example: -

    -
  • The code, -
    -    dset1=H5Dcreate(fid1,"Dataset1",H5T_STD_REF_DSETREG,sid1,H5P_DEFAULT);
    -
    - creates a dataset to store references to the dataset(s) regions (selections). - Notice that the H5T_STD_REF_DSETREG datatype is used. - -
  • This program uses hyperslab and point selections. The dataspace - handle sid2 is used for the calls to H5Sselect_hyperslab - and H5Sselect_elements. The handle was created when dataset - Dataset2 was created and it describes the dataset's - dataspace. It was not closed when the dataset was closed to decrease - the number of function calls used in the example. - In a real application program, one should open the dataset and determine - its dataspace using the H5Dget_space function. -
  • H5Rcreate is used to create a dataset region reference - and store it in a buffer. The signature of the function is: -
    -     herr_t H5Rcreate(void *buf, hid_t loc_id, const char *name,
    -                      H5R_type_t ref_type, hid_t space_id)
    -
    -
      -
    • The first argument specifies the buffer to store the reference. -
    • The second and third arguments specify the name of the referenced - dataset. In the example, the file identifier fid1 and the - absolute name of the dataset /Dataset2 were - used to identify the dataset. The reference to the region of this - dataset is stored in the buffer buf. - -
    • The fourth argument specifies the type of the reference. Since - the example creates references to the dataset regions, the - H5R_DATASET_REGION datatype is used. -
    • The fifth argument is a dataspace identifier of the referenced - dataset. -
    -
- -Output file contents: -The contents of the file trefer2.h5 created by this program -are as follows: - -
-HDF5 "trefer2.h5" {
-GROUP "/" {
-   DATASET "Dataset1" {
-      DATATYPE { H5T_REFERENCE }
-      DATASPACE { SIMPLE ( 4 ) / ( 4 ) }
-      DATA {
-         DATASET 0:744 {(2,2)-(7,7)}, DATASET 0:744 {(6,9), (2,2), (8,4), (1,6),
-          (2,8), (3,2), (0,4), (9,0), (7,1), (3,3)}, NULL, NULL
-      }
-   }
-   DATASET "Dataset2" {
-      DATATYPE { H5T_STD_U8LE }
-      DATASPACE { SIMPLE ( 10, 10 ) / ( 10, 10 ) }
-      DATA {
-         0, 3, 6, 9, 12, 15, 18, 21, 24, 27,
-         30, 33, 36, 39, 42, 45, 48, 51, 54, 57,
-         60, 63, 66, 69, 72, 75, 78, 81, 84, 87,
-         90, 93, 96, 99, 102, 105, 108, 111, 114, 117,
-         120, 123, 126, 129, 132, 135, 138, 141, 144, 147,
-         150, 153, 156, 159, 162, 165, 168, 171, 174, 177,
-         180, 183, 186, 189, 192, 195, 198, 201, 204, 207,
-         210, 213, 216, 219, 222, 225, 228, 231, 234, 237,
-         240, 243, 246, 249, 252, 255, 255, 255, 255, 255,
-         255, 255, 255, 255, 255, 255, 255, 255, 255, 255
-      }
-   }
-}
-}
-
-Notice how raw data of the dataset with the dataset regions is displayed. -Each element of the raw data consists of a reference to the dataset -(DATASET number1:number2) and its selected region. -If the selection is a hyperslab, the corner coordinates of the hyperslab -are displayed. -For the point selection, the coordinates of each point are displayed. -Since only two selections were stored, the third and fourth elements of the -dataset Dataset1 are set to NULL. -This was done by the buffer inizialization in the program. - -

Reading references to dataset regions

- -The following steps are involved in reading references to dataset -regions and referenced dataset regions (selections). -
    -
  1. Open and read the dataset containing references to the dataset regions. - The datatype H5T_STD_REF_DSETREG must be used during - read operation. -
  2. Use H5Rdereference to obtain the dataset identifier - from the read dataset region reference. -
                           OR
    -    
    - Use H5Rget_region to obtain the dataspace identifier for - the dataset containing the selection from the read dataset region reference. -
  3. With the dataspace identifier, the H5S interface functions, - H5Sget_select_*, can be used to obtain information - about the selection. -
  4. Close all objects when they are no longer needed. -
- -
Programming example
- -Example 14 -reads a dataset containing dataset region references. -It reads data from the dereferenced dataset and displays the number of -elements and raw data. Then it reads two selections: -a hyperslab selection and a point selection. The program queries a -number of points in the hyperslab and the coordinates and displays them. -Then it queries a number of selected points and their coordinates and -displays the information. - - -

-Output: -The output of this program is : -

-
- Number of elements in the dataset is : 100
- 0  3  6  9  12  15  18  21  24  27 
- 30  33  36  39  42  45  48  51  54  57 
- 60  63  66  69  72  75  78  81  84  87 
- 90  93  96  99  102  105  108  111  114  117 
- 120  123  126  129  132  135  138  141  144  147 
- 150  153  156  159  162  165  168  171  174  177 
- 180  183  186  189  192  195  198  201  204  207 
- 210  213  216  219  222  225  228  231  234  237 
- 240  243  246  249  252  255  255  255  255  255 
- 255  255  255  255  255  255  255  255  255  255 
- Number of elements in the hyperslab is : 36 
- Hyperslab coordinates are : 
- ( 2 , 2 ) ( 7 , 7 ) 
- Number of selected elements is : 10
- Coordinates of selected elements are : 
- ( 6 , 9 ) 
- ( 2 , 2 ) 
- ( 8 , 4 ) 
- ( 1 , 6 ) 
- ( 2 , 8 ) 
- ( 3 , 2 ) 
- ( 0 , 4 ) 
- ( 9 , 0 ) 
- ( 7 , 1 ) 
- ( 3 , 3 ) 
- 
-
- -Notes: -Note the following elements of this example: -
    -
  • The dataset with the region references was read by H5Dread - with the H5T_STD_REF_DSETREG datatype specified. -
  • The read reference can be used to obtain the dataset identifier - with the following call: -
    -    dset2 = H5Rdereference (dset1,H5R_DATASET_REGION,&rbuf[0]);
    -
    - or to obtain spacial information (dataspace and selection) with the call - to H5Rget_region: -
    -    sid2=H5Rget_region(dset1,H5R_DATASET_REGION,&rbuf[0]);
    -
    - The reference to the dataset region has information for both the dataset - itself and its selection. In both functions: -
      -
    • The first parameter is an identifier of the dataset with the - region references. -
    • The second parameter specifies the type of reference stored. - In this example, a reference to the dataset region is stored. -
    • The third parameter is a buffer containing the reference of the - specified type. -
    -
  • This example introduces several H5Sget_select* - functions used to obtain information about selections: - -
      - H5Sget_select_npoints: returns the number of elements in - the hyperslab
      - H5Sget_select_hyper_nblocks: returns the number of blocks - in the hyperslab
      - H5Sget_select_blocklist: returns the "lower left" and - "upper right" coordinates of the blocks in the hyperslab selection
      - H5Sget_select_bounds: returns the coordinates of the - "minimal" block containing a hyperslab selection
      - H5Sget_select_elem_npoints: returns the number of points - in the element selection
      - H5Sget_select_elem_points: returns the coordinates of - the element selection -
    -
- - -

(Return to TOC) -


- - -

4. Example Codes

-For the example codes, see -Introduction to HDF5 -- Example Codes. - - -

(Return to TOC) - - -


-
- - - -
-Introduction to HDF5 
-HDF5 User Guide  - -
-HDF5 Reference Manual 
-Other HDF5 documents and links  -
-
-
-
- - -
- -
-HDF Help Desk -
-Describes HDF5 Release 1.7, the unreleased development branch; working toward HDF5 Release 1.8.0 -
- -Last modified: 1 July 2004 - -
-Copyright   -
- - - - diff --git a/doc/html/H5.sample_code.html b/doc/html/H5.sample_code.html deleted file mode 100644 index 3c46205..0000000 --- a/doc/html/H5.sample_code.html +++ /dev/null @@ -1,123 +0,0 @@ - -HDF5 Draft API Example Code - - -
-

HDF5: API Example Code

-
- -

Example programs/sections of code below: -

-
#1 -
A simple example showing how to create a file. -
#2 -
A example showing how to create a homogenous multi-dimensional dataset. -
#3 -
A example showing how to read a generic dataset. -
- -
-

Simple Example showing how to create a file.

- -

Notes:
-This example creates a new HDF5 file and allows write access. -If the file exists already, the H5F_ACC_TRUNC flag would also be necessary to -overwrite the previous file's information. - -

Code: - -

-    hid_t file_id;
-
-    file_id=H5Fcreate("example1.h5",H5F_ACC_EXCL,H5P_DEFAULT,H5P_DEFAULT);
-
-    H5Fclose(file_id);
-
-
- -
-

Example showing how create a homogenous multi-dimensional dataset.

- -

Notes:
-This example creates a 4-dimensional dataset of 32-bit floating-point -numbers, corresponding to the current Scientific Dataset functionality. - -

Code: - -

- 1 hid_t file_id;              /* new file's ID */
- 2 hid_t dim_id;               /* new dimensionality's ID */
- 3 int rank=4;                 /* the number of dimensions */
- 4 hsize_t dims[4]={6,5,4,3};  /* the size of each dimension */
- 5 hid_t dataset_id;           /* new dataset's ID */
- 6 float buf[6][5][4][3];      /* storage for the dataset's data */
- 7 herr_t status;              /* function return status */
- 8 
- 9 file_id = H5Fcreate ("example3.h5", H5F_ACC_TRUNC, H5P_DEFAULT,
-10                      H5P_DEFAULT);
-11 assert (file_id >= 0);
-12 
-13 /* Create & initialize a dimensionality object */
-14 dim_id = H5Screate_simple (rank, dims);
-15 assert (dim_id >= 0);
-16 
-17 /* Create & initialize the dataset object */
-18 dataset_id = H5Dcreate (file_id, "Simple Object", H5T_NATIVE_FLOAT,
-19                         dim_id, H5P_DEFAULT);
-20 assert (dataset_id >= 0);
-21 
-22 <initialize data array>
-23 
-24 /* Write the entire dataset out */
-25 status = H5Dwrite (dataset_id, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL,
-26                    H5P_DEFAULT, buf);
-27 assert (status >= 0);
-28 
-29 /* Release the IDs we've created */
-30 H5Sclose (dim_id);
-31 H5Dclose (dataset_id);
-32 H5Fclose (file_id);
-
- -
-

Example showing how read a generic dataset.

- -

Notes:
-This example shows how to get the information for and display a generic -dataset. - -

Code: - -

- 1 hid_t file_id;          /* file's ID */
- 2 hid_t dataset_id;       /* dataset's ID in memory */
- 3 hid_t space_id;         /* dataspace's ID in memory */
- 4 uintn nelems;           /* number of elements in array */
- 5 double *buf;            /* pointer to the dataset's data */
- 6 herr_t status;          /* function return value */
- 7 
- 8 file_id = H5Fopen ("example6.h5", H5F_ACC_RDONLY, H5P_DEFAULT);
- 9 assert (file_id >= 0);
-10 
-11 /* Attach to a datatype object */
-12 dataset_id = H5Dopen (file_id, "dataset1");
-13 assert (dataset_id >= 0);
-14 
-15 /* Get the OID for the dataspace */
-16 space_id = H5Dget_space (dataset_id);
-17 assert (space_id >= 0);
-18 
-19 /* Allocate space for the data */
-20 nelems = H5Sget_npoints (space_id);
-21 buf = malloc (nelems * sizeof(double));
-22 
-23 /* Read in the dataset */
-24 status = H5Dread (dataset_id, H5T_NATIVE_DOUBLE, H5S_ALL,, H5S_ALL,
-25                   H5P_DEFAULT, buf);
-26 assert (status >= 0);
-27 
-28 /* Release the IDs we've accessed */
-29 H5Sclose (space_id);
-30 H5Dclose (dataset_id);
-31 H5Fclose (file_id);
-
diff --git a/doc/html/H5.user.PrintGen.html b/doc/html/H5.user.PrintGen.html deleted file mode 100644 index b73f093..0000000 --- a/doc/html/H5.user.PrintGen.html +++ /dev/null @@ -1,132 +0,0 @@ - - - - HDF5 User's Guide -- Single Print - - - - - - -
- - -

A User's Guide for HDF5

- -This page provides a means of printing the HDF5 User's Guide -with a single print command as follows: -
  1. Open this page in Internet Explorer, version 4.0 or or later. - (Sorry, but as of this writing, Netscape does not offer the required - print feature.) -
  2. On the File menu, select Print. -
  3. In the Print dialogue box, select - Print all linked documents. -
  4. Select OK. Depending on the print settings of your - browser, this will produce a 100- to 200-page set of documents, - one chapter or section at a time. -
  5. Once the print job is complete, discard the first page printed (this page). - The rest of the printout should form a complete copy of the - HDF5 User's Guide. -
- - -

-
- - - - - - - - - - - - - - - - - - - - - - - - - -
Title Page -     Title page. -
Copyright -     The HDF5 copyright notice, contact information, - and other back-of-the-title-page material. -
TOC -     Table of contents. -
HDF5 Files -     A guide to the H5F interface. -
Datasets - A guide to the H5D - interface. -
Datatypes - A guide to the H5T - interface. -
Dataspaces - A guide to the H5S - interface. -
Groups - A guide to the H5G - interface. -
References and - Identifiers - A guide to the H5R - and H5I interfaces. -
Attributes - A guide to the H5A - interface. -
Property Lists - A guide to the H5P - interface. -
Error Handling - A guide to the H5E - interface. -
Filters - A guide to the H5Z - interface. -
Caching - A guide for meta and raw data caching. -
Dataset Chunking - A guide to the issues and pitfalls - of dataset chunking. -
Mounting Files - A guide to mounting files containing - external HDF5 datasets. -
Debugging - A guide to debugging HDF5 API calls. -
Environment Variables - and Configuration Parameters - A list of HDF5 environment variables - and
   configuration parameters. -
DDL for HDF5 - A DDL in BNF for HDF5. -
-
- - -

- -
- - -
- -
-HDF Help Desk -
-Last modified: 22 July 1999 - -
- - - diff --git a/doc/html/H5.user.PrintTpg.html b/doc/html/H5.user.PrintTpg.html deleted file mode 100644 index c8b9536..0000000 --- a/doc/html/H5.user.PrintTpg.html +++ /dev/null @@ -1,79 +0,0 @@ - - - - HDF5 User's Guide - - - - - - -
-
-

- -
- - -HDF5 User's Guide - - -


-
-
-


- - -Release 1.2 -
-October 1999 -
- -



- - -Hierarchical Data Format (HDF) Group -
-National Center for Supercomputing Applications (NCSA) -
-University of Illinois at Urbana-Champaign (UIUC) -
- -
- -


-
-
-


-


- -
-A Note to the Reader: -The primary HDF5 user documents are the online HTML documents -distributed with the HDF5 code and binaries and found on the HDF5 website. -These PDF and PostScript versions are generated from the HTML to provide -the following capabilites: -
    -
  • To provide a version that can be reasonably printed in a - single print operation. -
  • To provide an easily searchable version. -
-In this package, you will find four PDF and PostScript documents: -
    -
  • Introduction to HDF5 -
  • A User's Guide for HDF5 -
  • HDF5 Reference Manual -
  • All three of the above documents concatenated into a single file -
-Note that these versions were created in response to user feedback; -the HDF Group is eager to hear from you so as to improve the delivered -product. -
- -

-
- - - - - diff --git a/doc/html/H5.user.html b/doc/html/H5.user.html deleted file mode 100644 index 6233d8f..0000000 --- a/doc/html/H5.user.html +++ /dev/null @@ -1,243 +0,0 @@ - - - - HDF5 User's Guide - - - - - - - - - - - -
-
- - - -
- HDF5 documents and links 
- Introduction to HDF5 
- HDF5 Reference Manual 
- HDF5 User's Guide for Release 1.6 
- -
- And in this document, the - HDF5 User's Guide from Release 1.4.5:     -
- Files   - Datasets   - Datatypes   - Dataspaces   - Groups   -
- References   - Attributes   - Property Lists   - Error Handling   -
- Filters   - Caching   - Chunking   - Mounting Files   -
- Performance   - Debugging   - Environment   - DDL   -
-
-
-

A User's Guide for HDF5
Release 1.4.5

- -

This document is the HDF5 User's Guide from - HDF5 Release 1.4.5. - Since a - new - HDF5 User's Guide is under development, - this version has not been updated for Release 1.6.0. -

The following documents form a loosely organized user's guide - to the HDF5 library. -

- -

- - - - - - - - - - - - - - - - - - - - -
HDF5 Files -     A guide to the H5F interface. -
Datasets - A guide to the H5D - interface. -
Datatypes - A guide to the H5T - interface. -
Dataspaces - A guide to the H5S - interface. -
Groups - A guide to the H5G - interface. -
References and - Identifiers - A guide to the H5R - and H5I interfaces. -
Attributes - A guide to the H5A - interface. -
Property Lists - A guide to the H5P - interface. -
Error Handling - A guide to the H5E - interface. -
Filters - A guide to the H5Z - interface. -
Caching - A guide for meta and raw data caching. -
Dataset Chunking - A guide to the issues and pitfalls - of dataset chunking. -
Mounting Files - A guide to mounting files containing - external HDF5 datasets. -
Performance - A guide to performance issues and - analysis tools. -
Debugging - A guide to debugging HDF5 API calls. -
Environment Variables - and -
   Configuration - Parameters -
A list of HDF5 environment variables - and configuration
   parameters. -
DDL for HDF5 - A DDL in BNF for HDF5. -
-
-
- - - -
-
- - - -
- HDF5 documents and links 
- Introduction to HDF5 
- HDF5 Reference Manual 
- HDF5 User's Guide for Release 1.6 
- -
- And in this document, the - HDF5 User's Guide from Release 1.4.5:     -
- Files   - Datasets   - Datatypes   - Dataspaces   - Groups   -
- References   - Attributes   - Property Lists   - Error Handling   -
- Filters   - Caching   - Chunking   - Mounting Files   -
- Performance   - Debugging   - Environment   - DDL   -
-
-
-
- - -
- -
-HDF Help Desk -
-Describes HDF5 Release 1.4.5, February 2003 -
- -Last modified: 3 July 2003 - - -
-Copyright   -
- - - diff --git a/doc/html/IH_map1.gif b/doc/html/IH_map1.gif deleted file mode 100644 index 7b0c95b..0000000 Binary files a/doc/html/IH_map1.gif and /dev/null differ diff --git a/doc/html/IH_map2.gif b/doc/html/IH_map2.gif deleted file mode 100644 index ea3db34..0000000 Binary files a/doc/html/IH_map2.gif and /dev/null differ diff --git a/doc/html/IH_map3.gif b/doc/html/IH_map3.gif deleted file mode 100644 index b045687..0000000 Binary files a/doc/html/IH_map3.gif and /dev/null differ diff --git a/doc/html/IH_map4.gif b/doc/html/IH_map4.gif deleted file mode 100644 index dd2f5e3..0000000 Binary files a/doc/html/IH_map4.gif and /dev/null differ diff --git a/doc/html/IH_mapFoot.gif b/doc/html/IH_mapFoot.gif deleted file mode 100644 index 01ce06f..0000000 Binary files a/doc/html/IH_mapFoot.gif and /dev/null differ diff --git a/doc/html/IH_mapHead.gif b/doc/html/IH_mapHead.gif deleted file mode 100644 index b655bd4..0000000 Binary files a/doc/html/IH_mapHead.gif and /dev/null differ diff --git a/doc/html/IOPipe.html b/doc/html/IOPipe.html deleted file mode 100644 index 7c24e2c..0000000 --- a/doc/html/IOPipe.html +++ /dev/null @@ -1,114 +0,0 @@ - - - - The Raw Data I/O Pipeline - - - -

The Raw Data I/O Pipeline

- -

The HDF5 raw data pipeline is a complicated beast that handles - all aspects of raw data storage and transfer of that data - between the file and the application. Data can be stored - contiguously (internal or external), in variable size external - segments, or regularly chunked; it can be sparse, extendible, - and/or compressible. Data transfers must be able to convert from - one data space to another, convert from one number type to - another, and perform partial I/O operations. Furthermore, - applications will expect their common usage of the pipeline to - perform well. - -

To accomplish these goals, the pipeline has been designed in a - modular way so no single subroutine is overly complicated and so - functionality can be inserted easily at the appropriate - locations in the pipeline. A general pipeline was developed and - then certain paths through the pipeline were optimized for - performance. - -

We describe only the file-to-memory side of the pipeline since - the memory-to-file side is a mirror image. We also assume that a - proper hyperslab of a simple data space is being read from the - file into a proper hyperslab of a simple data space in memory, - and that the data type is a compound type which may require - various number conversions on its members. - - Figure 1 - -

The diagrams should be read from the top down. The Line A - in the figure above shows that H5Dread() copies - data from a hyperslab of a file dataset to a hyperslab of an - application buffer by calling H5D_read(). And - H5D_read() calls, in a loop, - H5S_simp_fgath(), H5T_conv_struct(), - and H5S_simp_mscat(). A temporary buffer, TCONV, is - loaded with data points from the file, then data type conversion - is performed on the temporary buffer, and finally data points - are scattered out to application memory. Thus, data type - conversion is an in-place operation and data space conversion - consists of two steps. An additional temporary buffer, BKG, is - large enough to hold N instances of the destination - data type where N is the same number of data points - that can be held by the TCONV buffer (which is large enough to - hold either source or destination data points). - -

The application sets an upper limit for the size of the TCONV - buffer and optionally supplies a buffer. If no buffer is - supplied then one will be created by calling - malloc() when the pipeline is executed (when - necessary) and freed when the pipeline exits. The size of the - BKG buffer depends on the size of the TCONV buffer and if the - application supplies a BKG buffer it should be at least as large - as the TCONV buffer. The default size for these buffers is one - megabyte but the buffer might not be used to full capacity if - the buffer size is not an integer multiple of the source or - destination data point size (whichever is larger, but only - destination for the BKG buffer). - - - -

Occassionally the destination data points will be partially - initialized and the H5Dread() operation should not - clobber those values. For instance, the destination type might - be a struct with members a and b where - a is already initialized and we're reading - b from the file. An extra line, G, is added to the - pipeline to provide the type conversion functions with the - existing data. - - Figure 2 - -

It will most likely be quite common that no data type - conversion is necessary. In such cases a temporary buffer for - data type conversion is not needed and data space conversion - can happen in a single step. In fact, when the source and - destination data are both contiguous (they aren't in the - picture) the loop degenerates to a single iteration. - - - Figure 3 - -

So far we've looked only at internal contiguous storage, but by - replacing Line B in Figures 1 and 2 and Line A in Figure 3 with - Figure 4 the pipeline is able to handle regularly chunked - objects. Line B of Figure 4 is executed once for each chunk - which contains data to be read and the chunk address is found by - looking at a multi-dimensional key in a chunk B-tree which has - one entry per chunk. - - Figure 4 - -

If a single chunk is requested and the destination buffer is - the same size/shape as the chunk, then the CHUNK buffer is - bypassed and the destination buffer is used instead as shown in - Figure 5. - - Figure 5 - -


-
Robb Matzke
- - -Last modified: Wed Mar 18 10:38:30 EST 1998 - - - diff --git a/doc/html/Intro/IntroExamples.html b/doc/html/Intro/IntroExamples.html deleted file mode 100644 index 6511683..0000000 --- a/doc/html/Intro/IntroExamples.html +++ /dev/null @@ -1,2128 +0,0 @@ - - - - -Introduction to HDF5 - - - - - - - - - -
-
- - - -
-Introduction to HDF5 
-HDF5 User Guide  - -
-HDF5 Reference Manual 
-Other HDF5 documents and links  -
-
-
-

Introduction to HDF5 -- Example Codes

- - - -
-
- - - -
Table of Contents
- - -          - 1: Creating and writing a - dataset
-          - 2. Reading a hyperslab
-          - 3. Writing selected data
-          - 4. Working with compound datatypes
-          - 5. Creating and writing an extendible
-          -          - dataset
-          - 6. Reading data
-          - 7. Creating groups
-
- -
   - - -          - 8. Writing and reading - attributes
-          - 9. Creating and writing references
-          -          - to objects
-          - 10. Reading references to objects
-          - 11. Creating and writing references
-          -          - to dataset regions
-          - 12. Reading references to dataset
-          -          - regions -
-
-
-

- -


- - - -

Example 1: How to create a homogeneous multi-dimensional dataset and write it to a file.

-

This example creates a 2-dimensional HDF 5 dataset of little endian 32-bit integers. -

-
-/*  
- *  This example writes data to the HDF5 file.
- *  Data conversion is performed during write operation.  
- */
- 
-#include 
-
-#define FILE        "SDS.h5"
-#define DATASETNAME "IntArray" 
-#define NX     5                      /* dataset dimensions */
-#define NY     6
-#define RANK   2
-
-int
-main (void)
-{
-    hid_t       file, dataset;         /* file and dataset handles */
-    hid_t       datatype, dataspace;   /* handles */
-    hsize_t     dimsf[2];              /* dataset dimensions */
-    herr_t      status;                             
-    int         data[NX][NY];          /* data to write */
-    int         i, j;
-
-    /* 
-     * Data  and output buffer initialization. 
-     */
-    for (j = 0; j < NX; j++) {
-	for (i = 0; i < NY; i++)
-	    data[j][i] = i + j;
-    }     
-    /*
-     * 0 1 2 3 4 5 
-     * 1 2 3 4 5 6
-     * 2 3 4 5 6 7
-     * 3 4 5 6 7 8
-     * 4 5 6 7 8 9
-     */
-
-    /*
-     * Create a new file using H5F_ACC_TRUNC access,
-     * default file creation properties, and default file
-     * access properties.
-     */
-    file = H5Fcreate(FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
-
-    /*
-     * Describe the size of the array and create the data space for fixed
-     * size dataset. 
-     */
-    dimsf[0] = NX;
-    dimsf[1] = NY;
-    dataspace = H5Screate_simple(RANK, dimsf, NULL); 
-
-    /* 
-     * Define datatype for the data in the file.
-     * We will store little endian INT numbers.
-     */
-    datatype = H5Tcopy(H5T_NATIVE_INT);
-    status = H5Tset_order(datatype, H5T_ORDER_LE);
-
-    /*
-     * Create a new dataset within the file using defined dataspace and
-     * datatype and default dataset creation properties.
-     */
-    dataset = H5Dcreate(file, DATASETNAME, datatype, dataspace,
-			H5P_DEFAULT);
-
-    /*
-     * Write the data to the dataset using default transfer properties.
-     */
-    status = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL,
-		      H5P_DEFAULT, data);
-
-    /*
-     * Close/release resources.
-     */
-    H5Sclose(dataspace);
-    H5Tclose(datatype);
-    H5Dclose(dataset);
-    H5Fclose(file);
- 
-    return 0;
-}     
-
- - - - -

  -

(Return to TOC) - - -  -

Example 2. How to read a hyperslab from file into memory.

-

This example reads a hyperslab from a 2-d HDF5 dataset into a 3-d dataset in memory. -

-
-/*  
- *   This example reads hyperslab from the SDS.h5 file 
- *   created by h5_write.c program into two-dimensional
- *   plane of the three-dimensional array. 
- *   Information about dataset in the SDS.h5 file is obtained. 
- */
- 
-#include "hdf5.h"
-
-#define FILE        "SDS.h5"
-#define DATASETNAME "IntArray" 
-#define NX_SUB  3           /* hyperslab dimensions */ 
-#define NY_SUB  4 
-#define NX 7           /* output buffer dimensions */ 
-#define NY 7 
-#define NZ  3 
-#define RANK         2
-#define RANK_OUT     3
-
-int
-main (void)
-{
-    hid_t       file, dataset;         /* handles */
-    hid_t       datatype, dataspace;   
-    hid_t       memspace; 
-    H5T_class_t class;                 /* datatype class */
-    H5T_order_t order;                 /* data order */
-    size_t      size;                  /*
-				        * size of the data element	       
-				        * stored in file
-				        */
-    hsize_t     dimsm[3];              /* memory space dimensions */
-    hsize_t     dims_out[2];           /* dataset dimensions */      
-    herr_t      status;                             
-
-    int         data_out[NX][NY][NZ ]; /* output buffer */
-   
-    hsize_t      count[2];              /* size of the hyperslab in the file */
-    hsize_t      offset[2];             /* hyperslab offset in the file */
-    hsize_t      count_out[3];          /* size of the hyperslab in memory */
-    hsize_t      offset_out[3];         /* hyperslab offset in memory */
-    int          i, j, k, status_n, rank;
-
-    for (j = 0; j < NX; j++) {
-	for (i = 0; i < NY; i++) {
-	    for (k = 0; k < NZ ; k++)
-		data_out[j][i][k] = 0;
-	}
-    } 
- 
-    /*
-     * Open the file and the dataset.
-     */
-    file = H5Fopen(FILE, H5F_ACC_RDONLY, H5P_DEFAULT);
-    dataset = H5Dopen(file, DATASETNAME);
-
-    /*
-     * Get datatype and dataspace handles and then query
-     * dataset class, order, size, rank and dimensions.
-     */
-    datatype  = H5Dget_type(dataset);     /* datatype handle */ 
-    class     = H5Tget_class(datatype);
-    if (class == H5T_INTEGER) printf("Data set has INTEGER type \n");
-    order     = H5Tget_order(datatype);
-    if (order == H5T_ORDER_LE) printf("Little endian order \n");
-
-    size  = H5Tget_size(datatype);
-    printf(" Data size is %d \n", size);
-
-    dataspace = H5Dget_space(dataset);    /* dataspace handle */
-    rank      = H5Sget_simple_extent_ndims(dataspace);
-    status_n  = H5Sget_simple_extent_dims(dataspace, dims_out, NULL);
-    printf("rank %d, dimensions %lu x %lu \n", rank,
-	   (unsigned long)(dims_out[0]), (unsigned long)(dims_out[1]));
-
-    /* 
-     * Define hyperslab in the dataset. 
-     */
-    offset[0] = 1;
-    offset[1] = 2;
-    count[0]  = NX_SUB;
-    count[1]  = NY_SUB;
-    status = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, NULL, 
-				 count, NULL);
-
-    /*
-     * Define the memory dataspace.
-     */
-    dimsm[0] = NX;
-    dimsm[1] = NY;
-    dimsm[2] = NZ ;
-    memspace = H5Screate_simple(RANK_OUT,dimsm,NULL);   
-
-    /* 
-     * Define memory hyperslab. 
-     */
-    offset_out[0] = 3;
-    offset_out[1] = 0;
-    offset_out[2] = 0;
-    count_out[0]  = NX_SUB;
-    count_out[1]  = NY_SUB;
-    count_out[2]  = 1;
-    status = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, offset_out, NULL, 
-				 count_out, NULL);
-
-    /*
-     * Read data from hyperslab in the file into the hyperslab in 
-     * memory and display.
-     */
-    status = H5Dread(dataset, H5T_NATIVE_INT, memspace, dataspace,
-		     H5P_DEFAULT, data_out);
-    for (j = 0; j < NX; j++) {
-	for (i = 0; i < NY; i++) printf("%d ", data_out[j][i][0]);
-	printf("\n");
-    }
-    /*
-     * 0 0 0 0 0 0 0
-     * 0 0 0 0 0 0 0
-     * 0 0 0 0 0 0 0
-     * 3 4 5 6 0 0 0  
-     * 4 5 6 7 0 0 0
-     * 5 6 7 8 0 0 0
-     * 0 0 0 0 0 0 0
-     */
-
-    /*
-     * Close/release resources.
-     */
-    H5Tclose(datatype);
-    H5Dclose(dataset);
-    H5Sclose(dataspace);
-    H5Sclose(memspace);
-    H5Fclose(file);
-
-    return 0;
-}     
-
- - - - - -

  -

(Return to TOC) - - -

Example 3. Writing selected data from memory to a file.

-

This example shows how to use the selection capabilities of HDF5 to write selected data to a file. It includes the examples discussed in the text. - -

-
-/* 
- *  This program shows how the H5Sselect_hyperslab and H5Sselect_elements
- *  functions are used to write selected data from memory to the file.
- *  Program takes 48 elements from the linear buffer and writes them into
- *  the matrix using 3x2 blocks, (4,3) stride and (2,4) count. 
- *  Then four elements  of the matrix are overwritten with the new values and 
- *  file is closed. Program reopens the file and reads and displays the result.
- */ 
- 
-#include 
-
-#define FILE "Select.h5"
-
-#define MSPACE1_RANK     1          /* Rank of the first dataset in memory */
-#define MSPACE1_DIM      50         /* Dataset size in memory */ 
-
-#define MSPACE2_RANK     1          /* Rank of the second dataset in memory */ 
-#define MSPACE2_DIM      4          /* Dataset size in memory */ 
-
-#define FSPACE_RANK      2          /* Dataset rank as it is stored in the file */
-#define FSPACE_DIM1      8          /* Dimension sizes of the dataset as it is
-                                       stored in the file */
-#define FSPACE_DIM2      12 
-
-                                    /* We will read dataset back from the file
-                                       to the dataset in memory with these
-                                       dataspace parameters. */  
-#define MSPACE_RANK      2
-#define MSPACE_DIM1      8 
-#define MSPACE_DIM2      12 
-
-#define NPOINTS          4          /* Number of points that will be selected 
-                                       and overwritten */ 
-int main (void)
-{
-
-   hid_t   file, dataset;           /* File and dataset identifiers */
-   hid_t   mid1, mid2, fid;         /* Dataspace identifiers */
-   hsize_t dim1[] = {MSPACE1_DIM};  /* Dimension size of the first dataset 
-                                       (in memory) */ 
-   hsize_t dim2[] = {MSPACE2_DIM};  /* Dimension size of the second dataset
-                                       (in memory */ 
-   hsize_t fdim[] = {FSPACE_DIM1, FSPACE_DIM2}; 
-                                    /* Dimension sizes of the dataset (on disk) */
-
-   hsize_t start[2];  /* Start of hyperslab */
-   hsize_t stride[2]; /* Stride of hyperslab */
-   hsize_t count[2];  /* Block count */
-   hsize_t block[2];  /* Block sizes */
-
-   hsize_t coord[NPOINTS][FSPACE_RANK]; /* Array to store selected points 
-                                            from the file dataspace */ 
-   herr_t  ret;
-   uint    i,j;
-   int     matrix[MSPACE_DIM1][MSPACE_DIM2];
-   int     vector[MSPACE1_DIM];
-   int     values[] = {53, 59, 61, 67};  /* New values to be written */
-
-   /*
-    * Buffers' initialization.
-    */
-   vector[0] = vector[MSPACE1_DIM - 1] = -1;
-   for (i = 1; i < MSPACE1_DIM - 1; i++) vector[i] = i;
-
-   for (i = 0; i < MSPACE_DIM1; i++) {
-       for (j = 0; j < MSPACE_DIM2; j++)
-       matrix[i][j] = 0;
-    }
-
-    /*
-     * Create a file.
-     */
-    file = H5Fcreate(FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
-
-    /* 
-     * Create dataspace for the dataset in the file.
-     */
-    fid = H5Screate_simple(FSPACE_RANK, fdim, NULL);
-
-    /*
-     * Create dataset and write it into the file.
-     */
-    dataset = H5Dcreate(file, "Matrix in file", H5T_NATIVE_INT, fid, H5P_DEFAULT);
-    ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, matrix);
-
-    /*
-     * Select hyperslab for the dataset in the file, using 3x2 blocks, 
-     * (4,3) stride and (2,4) count starting at the position (0,1).
-     */
-    start[0]  = 0; start[1]  = 1;
-    stride[0] = 4; stride[1] = 3;
-    count[0]  = 2; count[1]  = 4;    
-    block[0]  = 3; block[1]  = 2;
-    ret = H5Sselect_hyperslab(fid, H5S_SELECT_SET, start, stride, count, block);
-
-    /*
-     * Create dataspace for the first dataset.
-     */
-    mid1 = H5Screate_simple(MSPACE1_RANK, dim1, NULL);
-
-    /*
-     * Select hyperslab. 
-     * We will use 48 elements of the vector buffer starting at the second element.
-     * Selected elements are 1 2 3 . . . 48
-     */
-    start[0]  = 1;
-    stride[0] = 1;
-    count[0]  = 48;
-    block[0]  = 1;
-    ret = H5Sselect_hyperslab(mid1, H5S_SELECT_SET, start, stride, count, block);
- 
-    /*
-     * Write selection from the vector buffer to the dataset in the file.
-     *
-     * File dataset should look like this:       
-     *                    0  1  2  0  3  4  0  5  6  0  7  8 
-     *                    0  9 10  0 11 12  0 13 14  0 15 16
-     *                    0 17 18  0 19 20  0 21 22  0 23 24
-     *                    0  0  0  0  0  0  0  0  0  0  0  0
-     *                    0 25 26  0 27 28  0 29 30  0 31 32
-     *                    0 33 34  0 35 36  0 37 38  0 39 40
-     *                    0 41 42  0 43 44  0 45 46  0 47 48
-     *                    0  0  0  0  0  0  0  0  0  0  0  0
-     */
-     ret = H5Dwrite(dataset, H5T_NATIVE_INT, mid1, fid, H5P_DEFAULT, vector);
-
-    /*
-     * Reset the selection for the file dataspace fid.
-     */
-    ret = H5Sselect_none(fid);
-
-    /*
-     * Create dataspace for the second dataset.
-     */
-    mid2 = H5Screate_simple(MSPACE2_RANK, dim2, NULL);
-
-    /*
-     * Select sequence of NPOINTS points in the file dataspace.
-     */
-    coord[0][0] = 0; coord[0][1] = 0;
-    coord[1][0] = 3; coord[1][1] = 3;
-    coord[2][0] = 3; coord[2][1] = 5;
-    coord[3][0] = 5; coord[3][1] = 6;
-
-    ret = H5Sselect_elements(fid, H5S_SELECT_SET, NPOINTS, 
-                             (const hsize_t **)coord);
-
-    /*
-     * Write new selection of points to the dataset.
-     */
-    ret = H5Dwrite(dataset, H5T_NATIVE_INT, mid2, fid, H5P_DEFAULT, values);   
-
-    /*
-     * File dataset should look like this:     
-     *                   53  1  2  0  3  4  0  5  6  0  7  8 
-     *                    0  9 10  0 11 12  0 13 14  0 15 16
-     *                    0 17 18  0 19 20  0 21 22  0 23 24
-     *                    0  0  0 59  0 61  0  0  0  0  0  0
-     *                    0 25 26  0 27 28  0 29 30  0 31 32
-     *                    0 33 34  0 35 36 67 37 38  0 39 40
-     *                    0 41 42  0 43 44  0 45 46  0 47 48
-     *                    0  0  0  0  0  0  0  0  0  0  0  0
-     *                                        
-     */
-   
-    /*
-     * Close memory file and memory dataspaces.
-     */
-    ret = H5Sclose(mid1); 
-    ret = H5Sclose(mid2); 
-    ret = H5Sclose(fid); 
- 
-    /*
-     * Close dataset.
-     */
-    ret = H5Dclose(dataset);
-
-    /*
-     * Close the file.
-     */
-    ret = H5Fclose(file);
-
-    /*
-     * Open the file.
-     */
-    file = H5Fopen(FILE, H5F_ACC_RDONLY, H5P_DEFAULT);
-
-    /*
-     * Open the dataset.
-     */
-    dataset = dataset = H5Dopen(file,"Matrix in file");
-
-    /*
-     * Read data back to the buffer matrix.
-     */
-    ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL,
-                  H5P_DEFAULT, matrix);
-
-    /*
-     * Display the result.
-     */
-    for (i=0; i < MSPACE_DIM1; i++) {
-        for(j=0; j < MSPACE_DIM2; j++) printf("%3d  ", matrix[i][j]);
-        printf("\n");
-    }
-
-    return 0;
-}
-
- - - - -

  -

(Return to TOC) - - -

Example 4. Working with compound datatypes.

-

This example shows how to create a compound datatype, write an array which has the compound datatype to the file, and read back subsets of fields. -

-
-/*
- * This example shows how to create a compound datatype,
- * write an array which has the compound datatype to the file,
- * and read back fields' subsets.
- */
-
-#include "hdf5.h"
-
-#define FILE          "SDScompound.h5"
-#define DATASETNAME   "ArrayOfStructures"
-#define LENGTH        10
-#define RANK          1
-
-int
-main(void)
-{
-
-    /* First structure  and dataset*/
-    typedef struct s1_t {
-	int    a;
-	float  b;
-	double c; 
-    } s1_t;
-    s1_t       s1[LENGTH];
-    hid_t      s1_tid;     /* File datatype identifier */
-
-    /* Second structure (subset of s1_t)  and dataset*/
-    typedef struct s2_t {
-	double c;
-	int    a;
-    } s2_t;
-    s2_t       s2[LENGTH];
-    hid_t      s2_tid;    /* Memory datatype handle */
-
-    /* Third "structure" ( will be used to read float field of s1) */
-    hid_t      s3_tid;   /* Memory datatype handle */
-    float      s3[LENGTH];
-
-    int        i;
-    hid_t      file, dataset, space; /* Handles */
-    herr_t     status;
-    hsize_t    dim[] = {LENGTH};   /* Dataspace dimensions */
-
-
-    /*
-     * Initialize the data
-     */
-    for (i = 0; i< LENGTH; i++) {
-        s1[i].a = i;
-        s1[i].b = i*i;
-        s1[i].c = 1./(i+1);
-    }
-
-    /*
-     * Create the data space.
-     */
-    space = H5Screate_simple(RANK, dim, NULL);
-
-    /*
-     * Create the file.
-     */
-    file = H5Fcreate(FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
-
-    /*
-     * Create the memory datatype. 
-     */
-    s1_tid = H5Tcreate (H5T_COMPOUND, sizeof(s1_t));
-    H5Tinsert(s1_tid, "a_name", HOFFSET(s1_t, a), H5T_NATIVE_INT);
-    H5Tinsert(s1_tid, "c_name", HOFFSET(s1_t, c), H5T_NATIVE_DOUBLE);
-    H5Tinsert(s1_tid, "b_name", HOFFSET(s1_t, b), H5T_NATIVE_FLOAT);
-
-    /* 
-     * Create the dataset.
-     */
-    dataset = H5Dcreate(file, DATASETNAME, s1_tid, space, H5P_DEFAULT);
-
-    /*
-     * Wtite data to the dataset; 
-     */
-    status = H5Dwrite(dataset, s1_tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, s1);
-
-    /*
-     * Release resources
-     */
-    H5Tclose(s1_tid);
-    H5Sclose(space);
-    H5Dclose(dataset);
-    H5Fclose(file);
- 
-    /*
-     * Open the file and the dataset.
-     */
-    file = H5Fopen(FILE, H5F_ACC_RDONLY, H5P_DEFAULT);
- 
-    dataset = H5Dopen(file, DATASETNAME);
-
-    /* 
-     * Create a datatype for s2
-     */
-    s2_tid = H5Tcreate(H5T_COMPOUND, sizeof(s2_t));
-
-    H5Tinsert(s2_tid, "c_name", HOFFSET(s2_t, c), H5T_NATIVE_DOUBLE);
-    H5Tinsert(s2_tid, "a_name", HOFFSET(s2_t, a), H5T_NATIVE_INT);
-
-    /*
-     * Read two fields c and a from s1 dataset. Fields in the file
-     * are found by their names "c_name" and "a_name".
-     */
-    status = H5Dread(dataset, s2_tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, s2);
-
-    /*
-     * Display the fields
-     */
-    printf("\n");
-    printf("Field c : \n");
-    for( i = 0; i < LENGTH; i++) printf("%.4f ", s2[i].c);
-    printf("\n");
-
-    printf("\n");
-    printf("Field a : \n");
-    for( i = 0; i < LENGTH; i++) printf("%d ", s2[i].a);
-    printf("\n");
-
-    /* 
-     * Create a datatype for s3.
-     */
-    s3_tid = H5Tcreate(H5T_COMPOUND, sizeof(float));
-
-    status = H5Tinsert(s3_tid, "b_name", 0, H5T_NATIVE_FLOAT);
-
-    /*
-     * Read field b from s1 dataset. Field in the file is found by its name.
-     */
-    status = H5Dread(dataset, s3_tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, s3);
-
-    /*
-     * Display the field
-     */
-    printf("\n");
-    printf("Field b : \n");
-    for( i = 0; i < LENGTH; i++) printf("%.4f ", s3[i]);
-    printf("\n");
-
-    /*
-     * Release resources
-     */
-    H5Tclose(s2_tid);
-    H5Tclose(s3_tid);
-    H5Dclose(dataset);
-    H5Fclose(file);
-
-    return 0;
-}
-
- - - - -

  -

(Return to TOC) - - -

Example 5. Creating and writing an extendible dataset.

-

This example shows how to create a 3x3 extendible dataset, to extend the dataset to 10x3, then to extend it again to 10x5. -

-
-/*  
- *   This example shows how to work with extendible dataset.
- *   In the current version of the library dataset MUST be
- *   chunked.
- *   
- */
- 
-#include "hdf5.h"
-
-#define FILE        "SDSextendible.h5"
-#define DATASETNAME "ExtendibleArray" 
-#define RANK         2
-#define NX     10
-#define NY     5 
-
-int
-main (void)
-{
-    hid_t       file;                          /* handles */
-    hid_t       dataspace, dataset;  
-    hid_t       filespace;                   
-    hid_t       cparms;                     
-    hsize_t      dims[2]  = { 3, 3};            /*
-						 * dataset dimensions				
-						 * at the creation time
-						 */
-    hsize_t      dims1[2] = { 3, 3};            /* data1 dimensions */ 
-    hsize_t      dims2[2] = { 7, 1};            /* data2 dimensions */  
-    hsize_t      dims3[2] = { 2, 2};            /* data3 dimensions */ 
-
-    hsize_t      maxdims[2] = {H5S_UNLIMITED, H5S_UNLIMITED};
-    hsize_t      chunk_dims[2] ={2, 5};
-    hsize_t      size[2];
-    hsize_t      offset[2];
-
-    herr_t      status;                             
-
-    int         data1[3][3] = { {1, 1, 1},       /* data to write */
-				{1, 1, 1},
-				{1, 1, 1} };      
-
-    int         data2[7]    = { 2, 2, 2, 2, 2, 2, 2};
-
-    int         data3[2][2] = { {3, 3},
-				{3, 3} };
-
-    /*
-     * Create the data space with unlimited dimensions. 
-     */
-    dataspace = H5Screate_simple(RANK, dims, maxdims); 
-
-    /*
-     * Create a new file. If file exists its contents will be overwritten.
-     */
-    file = H5Fcreate(FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
-
-    /* 
-     * Modify dataset creation properties, i.e. enable chunking.
-     */
-    cparms = H5Pcreate (H5P_DATASET_CREATE);
-    status = H5Pset_chunk( cparms, RANK, chunk_dims);
-
-    /*
-     * Create a new dataset within the file using cparms
-     * creation properties.
-     */
-    dataset = H5Dcreate(file, DATASETNAME, H5T_NATIVE_INT, dataspace,
-			cparms);
-
-    /*
-     * Extend the dataset. This call assures that dataset is at least 3 x 3.
-     */
-    size[0]   = 3; 
-    size[1]   = 3; 
-    status = H5Dextend (dataset, size);
-
-    /*
-     * Select a hyperslab.
-     */
-    filespace = H5Dget_space (dataset);
-    offset[0] = 0;
-    offset[1] = 0;
-    status = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, NULL,
-				 dims1, NULL);  
-
-    /*
-     * Write the data to the hyperslab.
-     */
-    status = H5Dwrite(dataset, H5T_NATIVE_INT, dataspace, filespace,
-		      H5P_DEFAULT, data1);
-
-    /*
-     * Extend the dataset. Dataset becomes 10 x 3.
-     */
-    dims[0]   = dims1[0] + dims2[0];
-    size[0]   = dims[0];  
-    size[1]   = dims[1]; 
-    status = H5Dextend (dataset, size);
-
-    /*
-     * Select a hyperslab.
-     */
-    filespace = H5Dget_space (dataset);
-    offset[0] = 3;
-    offset[1] = 0;
-    status = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, NULL,
-				 dims2, NULL);  
-
-    /*
-     * Define memory space
-     */
-    dataspace = H5Screate_simple(RANK, dims2, NULL); 
-
-    /*
-     * Write the data to the hyperslab.
-     */
-    status = H5Dwrite(dataset, H5T_NATIVE_INT, dataspace, filespace,
-		      H5P_DEFAULT, data2);
-
-    /*
-     * Extend the dataset. Dataset becomes 10 x 5.
-     */
-    dims[1]   = dims1[1] + dims3[1];
-    size[0]   = dims[0];  
-    size[1]   = dims[1]; 
-    status = H5Dextend (dataset, size);
-
-    /*
-     * Select a hyperslab
-     */
-    filespace = H5Dget_space (dataset);
-    offset[0] = 0;
-    offset[1] = 3;
-    status = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, NULL, 
-				 dims3, NULL);  
-
-    /*
-     * Define memory space.
-     */
-    dataspace = H5Screate_simple(RANK, dims3, NULL); 
-
-    /*
-     * Write the data to the hyperslab.
-     */
-    status = H5Dwrite(dataset, H5T_NATIVE_INT, dataspace, filespace,
-		      H5P_DEFAULT, data3);
-
-    /*
-     * Resulting dataset
-     * 
-     *	 1 1 1 3 3
-     *	 1 1 1 3 3
-     *	 1 1 1 0 0
-     *	 2 0 0 0 0
-     *	 2 0 0 0 0
-     *	 2 0 0 0 0
-     *	 2 0 0 0 0
-     *	 2 0 0 0 0
-     *	 2 0 0 0 0
-     *	 2 0 0 0 0
-     */
-    /*
-     * Close/release resources.
-     */
-    H5Dclose(dataset);
-    H5Sclose(dataspace);
-    H5Sclose(filespace);
-    H5Fclose(file);
-
-    return 0;
-}     
-
- - - - -

  -

(Return to TOC) - - -

Example 6. Reading data.

-

This example shows how to read information the chunked dataset written by Example 5. -

-
-/*  
- *   This example shows how to read data from a chunked dataset.
- *   We will read from the file created by h5_extend_write.c 
- */
- 
-#include "hdf5.h"
-
-#define FILE        "SDSextendible.h5"
-#define DATASETNAME "ExtendibleArray" 
-#define RANK         2
-#define RANKC        1
-#define NX           10
-#define NY           5 
-
-int
-main (void)
-{
-    hid_t       file;                        /* handles */
-    hid_t       dataset;  
-    hid_t       filespace;                   
-    hid_t       memspace;                  
-    hid_t       cparms;                   
-    hsize_t     dims[2];                     /* dataset and chunk dimensions*/ 
-    hsize_t     chunk_dims[2];
-    hsize_t     col_dims[1];
-    hsize_t     count[2];
-    hsize_t     offset[2];
-
-    herr_t      status, status_n;                             
-
-    int         data_out[NX][NY];  /* buffer for dataset to be read */
-    int         chunk_out[2][5];   /* buffer for chunk to be read */
-    int         column[10];        /* buffer for column to be read */
-    int         rank, rank_chunk;
-    hsize_t	i, j;
-    
-
- 
-    /*
-     * Open the file and the dataset.
-     */
-    file = H5Fopen(FILE, H5F_ACC_RDONLY, H5P_DEFAULT);
-    dataset = H5Dopen(file, DATASETNAME);
- 
-    /*
-     * Get dataset rank and dimension.
-     */
-
-    filespace = H5Dget_space(dataset);    /* Get filespace handle first. */
-    rank      = H5Sget_simple_extent_ndims(filespace);
-    status_n  = H5Sget_simple_extent_dims(filespace, dims, NULL);
-    printf("dataset rank %d, dimensions %lu x %lu\n",
-	   rank, (unsigned long)(dims[0]), (unsigned long)(dims[1]));
-
-    /*
-     * Get creation properties list.
-     */
-    cparms = H5Dget_create_plist(dataset); /* Get properties handle first. */
-
-    /* 
-     * Check if dataset is chunked.
-     */
-    if (H5D_CHUNKED == H5Pget_layout(cparms))  {
-
-	/*
-	 * Get chunking information: rank and dimensions
-	 */
-	rank_chunk = H5Pget_chunk(cparms, 2, chunk_dims);
-	printf("chunk rank %d, dimensions %lu x %lu\n", rank_chunk,
-	       (unsigned long)(chunk_dims[0]), (unsigned long)(chunk_dims[1]));
-    }
- 
-    /*
-     * Define the memory space to read dataset.
-     */
-    memspace = H5Screate_simple(RANK,dims,NULL);
- 
-    /*
-     * Read dataset back and display.
-     */
-    status = H5Dread(dataset, H5T_NATIVE_INT, memspace, filespace,
-		     H5P_DEFAULT, data_out);
-    printf("\n");
-    printf("Dataset: \n");
-    for (j = 0; j < dims[0]; j++) {
-	for (i = 0; i < dims[1]; i++) printf("%d ", data_out[j][i]);
-	printf("\n");
-    }     
-
-    /*
-     *	    dataset rank 2, dimensions 10 x 5 
-     *	    chunk rank 2, dimensions 2 x 5 
-
-     *	    Dataset:
-     *	    1 1 1 3 3 
-     *	    1 1 1 3 3 
-     *	    1 1 1 0 0 
-     *	    2 0 0 0 0 
-     *	    2 0 0 0 0 
-     *	    2 0 0 0 0 
-     *	    2 0 0 0 0 
-     *	    2 0 0 0 0 
-     *	    2 0 0 0 0 
-     *	    2 0 0 0 0 
-     */
-
-    /*
-     * Read the third column from the dataset.
-     * First define memory dataspace, then define hyperslab
-     * and read it into column array.
-     */
-    col_dims[0] = 10;
-    memspace =  H5Screate_simple(RANKC, col_dims, NULL);
-
-    /*
-     * Define the column (hyperslab) to read.
-     */
-    offset[0] = 0;
-    offset[1] = 2;
-    count[0]  = 10;
-    count[1]  = 1;
-    status = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, NULL,
-				 count, NULL);
-    status = H5Dread(dataset, H5T_NATIVE_INT, memspace, filespace,
-		     H5P_DEFAULT, column);
-    printf("\n");
-    printf("Third column: \n");
-    for (i = 0; i < 10; i++) {
-	printf("%d \n", column[i]);
-    }
-
-    /*
-     *	    Third column: 
-     *	    1 
-     *	    1 
-     *	    1 
-     *	    0 
-     *	    0 
-     *	    0 
-     *	    0 
-     *	    0 
-     *	    0 
-     *	    0 
-     */
-
-    /*
-     * Define the memory space to read a chunk.
-     */
-    memspace = H5Screate_simple(rank_chunk,chunk_dims,NULL);
-
-    /*
-     * Define chunk in the file (hyperslab) to read.
-     */
-    offset[0] = 2;
-    offset[1] = 0;
-    count[0]  = chunk_dims[0];
-    count[1]  = chunk_dims[1];
-    status = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, NULL, 
-				 count, NULL);
-
-    /*
-     * Read chunk back and display.
-     */
-    status = H5Dread(dataset, H5T_NATIVE_INT, memspace, filespace,
-		     H5P_DEFAULT, chunk_out);
-    printf("\n");
-    printf("Chunk: \n");
-    for (j = 0; j < chunk_dims[0]; j++) {
-	for (i = 0; i < chunk_dims[1]; i++) printf("%d ", chunk_out[j][i]);
-	printf("\n");
-    }     
-    /*
-     *	 Chunk: 
-     *	 1 1 1 0 0 
-     *	 2 0 0 0 0 
-     */
-
-    /*
-     * Close/release resources.
-     */
-    H5Pclose(cparms);
-    H5Dclose(dataset);
-    H5Sclose(filespace);
-    H5Sclose(memspace);
-    H5Fclose(file);
-
-    return 0;
-}
-
-
- - - - -

  -

(Return to TOC) - - -

Example 7. Creating groups.

-

This example shows how to create and access a group in an -HDF5 file and to place a dataset within this group. -It also illustrates the usage of the H5Giterate, -H5Glink, and H5Gunlink functions. - -

-
-/*
- * This example creates a group in the file and dataset in the group. 
- * Hard link to the group object is created and the dataset is accessed
- * under different names. 
- * Iterator function is used to find the object names in the root group.
- */ 
-
-
-#include "hdf5.h"
-
-
-#define FILE    "group.h5"
-#define RANK    2
-
- 
-herr_t file_info(hid_t loc_id, const char *name, void *opdata);
-                                     /* Operator function */
-int
-main(void)
-{
-
-    hid_t    file;
-    hid_t    grp;
-    hid_t    dataset, dataspace;
-    hid_t    plist; 
-
-    herr_t   status;
-    hsize_t  dims[2];
-    hsize_t  cdims[2];
- 
-    int      idx;
-
-    /*
-     * Create a file.
-     */
-    file = H5Fcreate(FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
-
-    /*
-     * Create a group in the file. 
-     */
-    grp = H5Gcreate(file, "/Data", 0);
-
-    /*
-     * Create dataset "Compressed Data" in the group using absolute
-     * name. Dataset creation property list is modified to use 
-     * GZIP compression with the compression effort set to 6. 
-     * Note that compression can be used only when dataset is chunked. 
-     */
-    dims[0] = 1000;
-    dims[1] = 20;
-    cdims[0] = 20;
-    cdims[1] = 20;
-    dataspace = H5Screate_simple(RANK, dims, NULL);
-    plist     = H5Pcreate(H5P_DATASET_CREATE);
-                H5Pset_chunk(plist, 2, cdims);
-                H5Pset_deflate( plist, 6); 
-    dataset = H5Dcreate(file, "/Data/Compressed_Data", H5T_NATIVE_INT, 
-                        dataspace, plist); 
- 
-    /* 
-     * Close the dataset and the file.
-     */
-    H5Sclose(dataspace);
-    H5Dclose(dataset);
-    H5Fclose(file);
-
-    /*
-     * Now reopen the file and group in the file. 
-     */
-    file = H5Fopen(FILE, H5F_ACC_RDWR, H5P_DEFAULT);
-    grp  = H5Gopen(file, "Data");
-
-    /* 
-     * Access "Compressed_Data" dataset in the group. 
-     */
-    dataset = H5Dopen(grp, "Compressed_Data");
-    if( dataset < 0) printf(" Dataset is not found. \n");
-    printf("\"/Data/Compressed_Data\" dataset is open \n");
-
-    /*
-     * Close the dataset.
-     */
-    status = H5Dclose(dataset);
-
-    /*
-     * Create hard link to the Data group.
-     */
-    status = H5Glink(file, H5G_LINK_HARD, "Data", "Data_new");
-
-    /* 
-     * We can access "Compressed_Data" dataset using created
-     * hard link "Data_new". 
-     */
-    dataset = H5Dopen(file, "/Data_new/Compressed_Data");
-    if( dataset < 0) printf(" Dataset is not found. \n");
-    printf("\"/Data_new/Compressed_Data\" dataset is open \n");
-
-    /*
-     * Close the dataset.
-     */
-    status = H5Dclose(dataset);
-
-    /* 
-     * Use iterator to see the names of the objects in the file
-     * root directory.
-     */
-    idx = H5Giterate(file, "/", NULL, file_info, NULL);
-
-    /*
-     * Unlink  name "Data" and use iterator to see the names
-     * of the objects in the file root direvtory.
-     */
-    if (H5Gunlink(file, "Data") < 0)  
-      printf(" H5Gunlink failed \n");
-    else  
-      printf("\"Data\" is unlinked \n");
-
-    idx = H5Giterate(file, "/", NULL, file_info, NULL);
-    
-
-    /*
-     * Close the file.
-     */
-     
-    status = H5Fclose(file);
-
-    return 0;
-}
-/*
- * Operator function.
- */
-herr_t
-file_info(hid_t loc_id, const char *name, void *opdata)
-{
-    hid_t grp;
-    /*
-     * Open the group using its name.
-     */
-    grp = H5Gopen(loc_id, name);
- 
-    /*
-     * Display group name.
-     */
-    printf("\n");
-    printf("Name : ");
-    puts(name);
-    
-    H5Gclose(grp);
-    return 0;
- }
-
- - - - -

  -

(Return to TOC) - - -

Example 8. Writing and reading attributes.

-

This example shows how to create HDF5 attributes, to attach them to a dataset, and to read through all of the attributes of a dataset. - -

-
-/* 
- *  This program illustrates the usage of the H5A Interface functions.
- *  It creates and writes a dataset, and then creates and writes array,
- *  scalar, and string attributes of the dataset. 
- *  Program reopens the file, attaches to the scalar attribute using
- *  attribute name and reads and displays its value. Then index of the
- *  third attribute is used to read and display attribute values.
- *  The H5Aiterate function is used to iterate through the dataset attributes,
- *  and display their names. The function is also reads and displays the values 
- *  of the array attribute. 
- */ 
- 
-#include 
-#include 
-
-#define FILE "Attributes.h5"
-
-#define RANK  1   /* Rank and size of the dataset  */ 
-#define SIZE  7
-
-#define ARANK  2   /* Rank and dimension sizes of the first dataset attribute */
-#define ADIM1  2
-#define ADIM2  3 
-#define ANAME  "Float attribute"      /* Name of the array attribute */
-#define ANAMES "Character attribute" /* Name of the string attribute */
-
-herr_t attr_info(hid_t loc_id, const char *name, void *opdata); 
-                                     /* Operator function */
-
-int 
-main (void)
-{
-
-   hid_t   file, dataset;       /* File and dataset identifiers */
-   
-   hid_t   fid;                 /* Dataspace identifier */
-   hid_t   attr1, attr2, attr3; /* Attribute identifiers */
-   hid_t   attr;
-   hid_t   aid1, aid2, aid3;    /* Attribute dataspace identifiers */ 
-   hid_t   atype;               /* Attribute type */
-
-   hsize_t fdim[] = {SIZE};
-   hsize_t adim[] = {ADIM1, ADIM2};  /* Dimensions of the first attribute  */
-   
-   float matrix[ADIM1][ADIM2]; /* Attribute data */ 
-
-   herr_t  ret;                /* Return value */
-   uint    i,j;                /* Counters */
-   int     idx;                /* Attribute index */
-   char    string_out[80];     /* Buffer to read string attribute back */
-   int     point_out;          /* Buffer to read scalar attribute back */
-
-   /*
-    * Data initialization.
-    */
-   int vector[] = {1, 2, 3, 4, 5, 6, 7};  /* Dataset data */
-   int point = 1;                         /* Value of the scalar attribute */ 
-   char string[] = "ABCD";                /* Value of the string attribute */
-
-   
-   for (i=0; i < ADIM1; i++) {            /* Values of the array attribute */
-       for (j=0; j < ADIM2; j++)
-       matrix[i][j] = -1.;
-   }
-
-   /*
-    * Create a file.
-    */
-   file = H5Fcreate(FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
-
-   /* 
-    * Create the dataspace for the dataset in the file.
-    */
-   fid = H5Screate(H5S_SIMPLE);
-   ret = H5Sset_extent_simple(fid, RANK, fdim, NULL);
-
-   /*
-    * Create the dataset in the file.
-    */
-   dataset = H5Dcreate(file, "Dataset", H5T_NATIVE_INT, fid, H5P_DEFAULT);
-
-   /*
-    * Write data to the dataset.
-    */
-   ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL , H5S_ALL, H5P_DEFAULT, vector);
-
-   /*
-    * Create dataspace for the first attribute. 
-    */
-   aid1 = H5Screate(H5S_SIMPLE);
-   ret  = H5Sset_extent_simple(aid1, ARANK, adim, NULL);
-
-   /*
-    * Create array attribute.
-    */
-   attr1 = H5Acreate(dataset, ANAME, H5T_NATIVE_FLOAT, aid1, H5P_DEFAULT);
-
-   /*
-    * Write array attribute.
-    */
-   ret = H5Awrite(attr1, H5T_NATIVE_FLOAT, matrix);
-
-   /*
-    * Create scalar attribute.
-    */
-   aid2  = H5Screate(H5S_SCALAR);
-   attr2 = H5Acreate(dataset, "Integer attribute", H5T_NATIVE_INT, aid2,
-                     H5P_DEFAULT);
-
-   /*
-    * Write scalar attribute.
-    */
-   ret = H5Awrite(attr2, H5T_NATIVE_INT, &point); 
-
-   /*
-    * Create string attribute.
-    */
-   aid3  = H5Screate(H5S_SCALAR);
-   atype = H5Tcopy(H5T_C_S1);
-           H5Tset_size(atype, 4);
-   attr3 = H5Acreate(dataset, ANAMES, atype, aid3, H5P_DEFAULT);
-
-   /*
-    * Write string attribute.
-    */
-   ret = H5Awrite(attr3, atype, string); 
-
-   /*
-    * Close attribute and file dataspaces.
-    */
-   ret = H5Sclose(aid1); 
-   ret = H5Sclose(aid2); 
-   ret = H5Sclose(aid3); 
-   ret = H5Sclose(fid); 
-
-   /*
-    * Close the attributes.
-    */ 
-   ret = H5Aclose(attr1);
-   ret = H5Aclose(attr2);
-   ret = H5Aclose(attr3);
- 
-   /*
-    * Close the dataset.
-    */
-   ret = H5Dclose(dataset);
-
-   /*
-    * Close the file.
-    */
-   ret = H5Fclose(file);
-
-   /*
-    * Reopen the file.
-    */
-   file = H5Fopen(FILE, H5F_ACC_RDONLY, H5P_DEFAULT);
-
-   /*
-    * Open the dataset.
-    */
-   dataset = H5Dopen(file,"Dataset");
-
-   /*
-    * Attach to the scalar attribute using attribute name, then read and 
-    * display its value.
-    */
-   attr = H5Aopen_name(dataset,"Integer attribute");
-   ret  = H5Aread(attr, H5T_NATIVE_INT, &point_out);
-   printf("The value of the attribute \"Integer attribute\" is %d \n", point_out); 
-   ret =  H5Aclose(attr);
-
-   /*
-    * Attach to the string attribute using its index, then read and display the value.
-    */
-   attr  = H5Aopen_idx(dataset, 2);
-   atype = H5Tcopy(H5T_C_S1);
-           H5Tset_size(atype, 4);
-   ret   = H5Aread(attr, atype, string_out);
-   printf("The value of the attribute with the index 2 is %s \n", string_out);
-   ret   = H5Aclose(attr);
-   ret   = H5Tclose(atype);
-
-   /*
-    * Get attribute info using iteration function. 
-    */
-   idx = H5Aiterate(dataset, NULL, attr_info, NULL);
-
-   /*
-    * Close the dataset and the file.
-    */
-   H5Dclose(dataset);
-   H5Fclose(file);
-
-   return 0;  
-}
-
-/*
- * Operator function.
- */
-herr_t 
-attr_info(hid_t loc_id, const char *name, void *opdata)
-{
-    hid_t attr, atype, aspace;  /* Attribute, datatype and dataspace identifiers */
-    int   rank;
-    hsize_t sdim[64]; 
-    herr_t ret;
-    int i;
-    size_t npoints;             /* Number of elements in the array attribute. */ 
-    float *float_array;         /* Pointer to the array attribute. */
-    /*
-     * Open the attribute using its name.
-     */    
-    attr = H5Aopen_name(loc_id, name);
-
-    /*
-     * Display attribute name.
-     */
-    printf("\n");
-    printf("Name : ");
-    puts(name);
-
-    /* 
-     * Get attribute datatype, dataspace, rank, and dimensions.
-     */
-    atype  = H5Aget_type(attr);
-    aspace = H5Aget_space(attr);
-    rank = H5Sget_simple_extent_ndims(aspace);
-    ret = H5Sget_simple_extent_dims(aspace, sdim, NULL);
-
-    /*
-     *  Display rank and dimension sizes for the array attribute.
-     */
-
-    if(rank > 0) {
-    printf("Rank : %d \n", rank); 
-    printf("Dimension sizes : ");
-    for (i=0; i< rank; i++) printf("%d ", (int)sdim[i]);
-    printf("\n");
-    }
-
-    /*
-     * Read array attribute and display its type and values.
-     */
-
-    if (H5T_FLOAT == H5Tget_class(atype)) {
-    printf("Type : FLOAT \n"); 
-    npoints = H5Sget_simple_extent_npoints(aspace);
-    float_array = (float *)malloc(sizeof(float)*(int)npoints); 
-    ret = H5Aread(attr, atype, float_array);
-    printf("Values : ");
-    for( i = 0; i < (int)npoints; i++) printf("%f ", float_array[i]); 
-    printf("\n");
-    free(float_array);
-    }
-
-    /*
-     * Release all identifiers.
-     */
-    H5Tclose(atype);
-    H5Sclose(aspace);
-    H5Aclose(attr);
-
-    return 0;
-}
-
- - - -

  -

(Return to TOC) - - -

Example 9. Creating and storing references to objects.

-This example creates a group and two datasets and a named datatype -in the group. References to these four objects are stored in the dataset -in the root group. - -
-
-#include <hdf5.h>
-
-#define FILE1   "trefer1.h5"
-
-/* 1-D dataset with fixed dimensions */
-#define SPACE1_NAME  "Space1"
-#define SPACE1_RANK	1
-#define SPACE1_DIM1	4
-
-/* 2-D dataset with fixed dimensions */
-#define SPACE2_NAME  "Space2"
-#define SPACE2_RANK	2
-#define SPACE2_DIM1	10
-#define SPACE2_DIM2	10
-
-int 
-main(void) {
-    hid_t		fid1;		/* HDF5 File IDs		*/
-    hid_t		dataset;	/* Dataset ID			*/
-    hid_t		group;      /* Group ID             */
-    hid_t		sid1;       /* Dataspace ID			*/
-    hid_t		tid1;       /* Datatype ID			*/
-    hsize_t		dims1[] = {SPACE1_DIM1};
-    hobj_ref_t      *wbuf;      /* buffer to write to disk */
-    int       *tu32;      /* Temporary pointer to int data */
-    int        i;          /* counting variables */
-    const char *write_comment="Foo!"; /* Comments for group */
-    herr_t		ret;		/* Generic return value		*/
-
-/* Compound datatype */
-typedef struct s1_t {
-    unsigned int a;
-    unsigned int b;
-    float c;
-} s1_t;
-
-    /* Allocate write buffers */
-    wbuf=(hobj_ref_t *)malloc(sizeof(hobj_ref_t)*SPACE1_DIM1);
-    tu32=malloc(sizeof(int)*SPACE1_DIM1);
-
-    /* Create file */
-    fid1 = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
-
-    /* Create dataspace for datasets */
-    sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL);
-
-    /* Create a group */
-    group=H5Gcreate(fid1,"Group1",-1);
-
-    /* Set group's comment */
-    ret=H5Gset_comment(group,".",write_comment);
-
-    /* Create a dataset (inside Group1) */
-    dataset=H5Dcreate(group,"Dataset1",H5T_STD_U32LE,sid1,H5P_DEFAULT);
-
-    for(i=0; i < SPACE1_DIM1; i++)
-        tu32[i] = i*3;
-
-    /* Write selection to disk */
-    ret=H5Dwrite(dataset,H5T_NATIVE_INT,H5S_ALL,H5S_ALL,H5P_DEFAULT,tu32);
-
-    /* Close Dataset */
-    ret = H5Dclose(dataset);
-
-    /* Create another dataset (inside Group1) */
-    dataset=H5Dcreate(group,"Dataset2",H5T_NATIVE_UCHAR,sid1,H5P_DEFAULT);
-
-    /* Close Dataset */
-    ret = H5Dclose(dataset);
-
-    /* Create a datatype to refer to */
-    tid1 = H5Tcreate (H5T_COMPOUND, sizeof(s1_t));
-
-    /* Insert fields */
-    ret=H5Tinsert (tid1, "a", HOFFSET(s1_t,a), H5T_NATIVE_INT);
-
-    ret=H5Tinsert (tid1, "b", HOFFSET(s1_t,b), H5T_NATIVE_INT);
-
-    ret=H5Tinsert (tid1, "c", HOFFSET(s1_t,c), H5T_NATIVE_FLOAT);
-
-    /* Save datatype for later */
-    ret=H5Tcommit (group, "Datatype1", tid1);
-
-    /* Close datatype */
-    ret = H5Tclose(tid1);
-
-    /* Close group */
-    ret = H5Gclose(group);
-
-    /* Create a dataset to store references */
-    dataset=H5Dcreate(fid1,"Dataset3",H5T_STD_REF_OBJ,sid1,H5P_DEFAULT);
-
-    /* Create reference to dataset */
-    ret = H5Rcreate(&wbuf[0],fid1,"/Group1/Dataset1",H5R_OBJECT,-1);
-
-    /* Create reference to dataset */
-    ret = H5Rcreate(&wbuf[1],fid1,"/Group1/Dataset2",H5R_OBJECT,-1);
-
-    /* Create reference to group */
-    ret = H5Rcreate(&wbuf[2],fid1,"/Group1",H5R_OBJECT,-1);
-
-    /* Create reference to named datatype */
-    ret = H5Rcreate(&wbuf[3],fid1,"/Group1/Datatype1",H5R_OBJECT,-1);
-
-    /* Write selection to disk */
-    ret=H5Dwrite(dataset,H5T_STD_REF_OBJ,H5S_ALL,H5S_ALL,H5P_DEFAULT,wbuf);
-
-    /* Close disk dataspace */
-    ret = H5Sclose(sid1);
-    
-    /* Close Dataset */
-    ret = H5Dclose(dataset);
-
-    /* Close file */
-    ret = H5Fclose(fid1);
-    free(wbuf);
-    free(tu32);
-    return 0;
-}
-
-
- - - - -

  -

(Return to TOC) - - -

Example 10. Reading references to objects.

-This example opens and reads dataset Dataset3 from -the file created in Example 9. Then the program dereferences the references -to dataset Dataset1, the group and the named datatype, -and opens those objects. -The program reads and displays the dataset's data, the group's comment, and -the number of members of the compound datatype. - - -
-
-#include <stdlib.h>
-#include <hdf5.h>
-
-#define FILE1   "trefer1.h5"
-
-/* dataset with fixed dimensions */
-#define SPACE1_NAME  "Space1"
-#define SPACE1_RANK	1
-#define SPACE1_DIM1	4
-
-int 
-main(void)
-{
-    hid_t		fid1;		/* HDF5 File IDs		*/
-    hid_t		dataset,	/* Dataset ID			*/
-                dset2;      /* Dereferenced dataset ID */
-    hid_t		group;      /* Group ID             */
-    hid_t		sid1;       /* Dataspace ID			*/
-    hid_t		tid1;       /* Datatype ID			*/
-    hobj_ref_t      *rbuf;      /* buffer to read from disk */
-    int                *tu32;      /* temp. buffer read from disk */
-    int        i;          /* counting variables */
-    char read_comment[10];
-    herr_t		ret;		/* Generic return value		*/
-
-    /* Allocate read buffers */
-    rbuf = malloc(sizeof(hobj_ref_t)*SPACE1_DIM1);
-    tu32 = malloc(sizeof(int)*SPACE1_DIM1);
-
-    /* Open the file */
-    fid1 = H5Fopen(FILE1, H5F_ACC_RDWR, H5P_DEFAULT);
-
-    /* Open the dataset */
-    dataset=H5Dopen(fid1,"/Dataset3");
-
-    /* Read selection from disk */
-    ret=H5Dread(dataset,H5T_STD_REF_OBJ,H5S_ALL,H5S_ALL,H5P_DEFAULT,rbuf);
-
-    /* Open dataset object */
-    dset2 = H5Rdereference(dataset,H5R_OBJECT,&rbuf[0]);
-
-    /* Check information in referenced dataset */
-    sid1 = H5Dget_space(dset2);
-
-    ret=H5Sget_simple_extent_npoints(sid1);
-
-    /* Read from disk */
-    ret=H5Dread(dset2,H5T_NATIVE_INT,H5S_ALL,H5S_ALL,H5P_DEFAULT,tu32);
-    printf("Dataset data : \n");
-     for (i=0; i < SPACE1_DIM1 ; i++) printf (" %d ", tu32[i]);
-    printf("\n");
-    printf("\n");
-
-    /* Close dereferenced Dataset */
-    ret = H5Dclose(dset2);
-
-    /* Open group object */
-    group = H5Rdereference(dataset,H5R_OBJECT,&rbuf[2]);
-
-    /* Get group's comment */
-    ret=H5Gget_comment(group,".",10,read_comment);
-    printf("Group comment is %s \n", read_comment);
-    printf(" \n");
-    /* Close group */
-    ret = H5Gclose(group);
-
-    /* Open datatype object */
-    tid1 = H5Rdereference(dataset,H5R_OBJECT,&rbuf[3]);
-
-    /* Verify correct datatype */
-    {
-        H5T_class_t tclass;
-
-        tclass= H5Tget_class(tid1);
-        if ((tclass == H5T_COMPOUND))
-           printf ("Number of compound datatype members is %d \n", H5Tget_nmembers(tid1)); 
-    printf(" \n");
-    }
-
-    /* Close datatype */
-    ret = H5Tclose(tid1);
-
-    /* Close Dataset */
-    ret = H5Dclose(dataset);
-
-    /* Close file */
-    ret = H5Fclose(fid1);
-
-    /* Free memory buffers */
-    free(rbuf);
-    free(tu32);
-    return 0;
-}   
-
-
- - - -

  -

(Return to TOC) - - -

Example 11. Creating and writing a reference to a region.

- -This example creates a dataset in the file. Then it creates a dataset -to store references to the dataset regions (selections). -The first selection is a 6 x 6 hyperslab. -The second selection is a point selection in the same dataset. -References to both selections are created and stored in the buffer, -and then written to the dataset in the file. - -
-#include <stdlib.h>
-#include <hdf5.h>
-
-#define FILE2	"trefer2.h5"
-#define SPACE1_NAME  "Space1"
-#define SPACE1_RANK     1
-#define SPACE1_DIM1     4
-
-/* Dataset with fixed dimensions */
-#define SPACE2_NAME  "Space2"
-#define SPACE2_RANK	2
-#define SPACE2_DIM1	10
-#define SPACE2_DIM2	10
-
-/* Element selection information */
-#define POINT1_NPOINTS 10
-
-int
-main(void)
-{
-    hid_t	fid1;		/* HDF5 File IDs		*/
-    hid_t	dset1,		/* Dataset ID			*/
-                dset2;      /* Dereferenced dataset ID */
-    hid_t	sid1,       /* Dataspace ID	#1		*/
-                sid2;       /* Dataspace ID	#2		*/
-    hsize_t	dims1[] = {SPACE1_DIM1},
-            	dims2[] = {SPACE2_DIM1, SPACE2_DIM2};
-    hsize_t	start[SPACE2_RANK];     /* Starting location of hyperslab */
-    hsize_t	stride[SPACE2_RANK];    /* Stride of hyperslab */
-    hsize_t	count[SPACE2_RANK];     /* Element count of hyperslab */
-    hsize_t	block[SPACE2_RANK];     /* Block size of hyperslab */
-    hsize_t	coord1[POINT1_NPOINTS][SPACE2_RANK]; 
-                                    /* Coordinates for point selection */
-    hdset_reg_ref_t      *wbuf;      /* buffer to write to disk */
-    int     *dwbuf;      /* Buffer for writing numeric data to disk */
-    int        i;          /* counting variables */
-    herr_t		ret;		/* Generic return value		*/
-
-
-    /* Allocate write & read buffers */
-    wbuf=calloc(sizeof(hdset_reg_ref_t), SPACE1_DIM1);
-    dwbuf=malloc(sizeof(int)*SPACE2_DIM1*SPACE2_DIM2);
-
-    /* Create file */
-    fid1 = H5Fcreate(FILE2, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
-
-    /* Create dataspace for datasets */
-    sid2 = H5Screate_simple(SPACE2_RANK, dims2, NULL);
-
-    /* Create a dataset */
-    dset2=H5Dcreate(fid1,"Dataset2",H5T_STD_U8LE,sid2,H5P_DEFAULT);
-
-    for(i=0; i < SPACE2_DIM1*SPACE2_DIM2; i++)
-        dwbuf[i]=i*3;
-
-    /* Write selection to disk */
-    ret=H5Dwrite(dset2,H5T_NATIVE_INT,H5S_ALL,H5S_ALL,H5P_DEFAULT,dwbuf);
-
-    /* Close Dataset */
-    ret = H5Dclose(dset2);
-
-    /* Create dataspace for the reference dataset */
-    sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL);
-
-    /* Create a dataset */
-    dset1=H5Dcreate(fid1,"Dataset1",H5T_STD_REF_DSETREG,sid1,H5P_DEFAULT);
-
-    /* Create references */
-
-    /* Select 6x6 hyperslab for first reference */
-    start[0]=2; start[1]=2;
-    stride[0]=1; stride[1]=1;
-    count[0]=6; count[1]=6;
-    block[0]=1; block[1]=1;
-    ret = H5Sselect_hyperslab(sid2,H5S_SELECT_SET,start,stride,count,block);
-
-    /* Store first dataset region */
-    ret = H5Rcreate(&wbuf[0],fid1,"/Dataset2",H5R_DATASET_REGION,sid2);
-
-    /* Select sequence of ten points for second reference */
-    coord1[0][0]=6; coord1[0][1]=9;
-    coord1[1][0]=2; coord1[1][1]=2;
-    coord1[2][0]=8; coord1[2][1]=4;
-    coord1[3][0]=1; coord1[3][1]=6;
-    coord1[4][0]=2; coord1[4][1]=8;
-    coord1[5][0]=3; coord1[5][1]=2;
-    coord1[6][0]=0; coord1[6][1]=4;
-    coord1[7][0]=9; coord1[7][1]=0;
-    coord1[8][0]=7; coord1[8][1]=1;
-    coord1[9][0]=3; coord1[9][1]=3;
-    ret = H5Sselect_elements(sid2,H5S_SELECT_SET,POINT1_NPOINTS,(const hsize_t **)coord1);
-
-    /* Store second dataset region */
-    ret = H5Rcreate(&wbuf[1],fid1,"/Dataset2",H5R_DATASET_REGION,sid2);
-
-    /* Write selection to disk */
-    ret=H5Dwrite(dset1,H5T_STD_REF_DSETREG,H5S_ALL,H5S_ALL,H5P_DEFAULT,wbuf);
-
-    /* Close all objects */
-    ret = H5Sclose(sid1);
-    ret = H5Dclose(dset1);
-    ret = H5Sclose(sid2);
-    
-    /* Close file */
-    ret = H5Fclose(fid1);
-
-    free(wbuf);
-    free(dwbuf);
-    return 0;
-}   
-
-
- - - -

  -

(Return to TOC) - - -

Example 12. Reading a reference to a region.

- -This example reads a dataset containing dataset region references. -It reads data from the dereferenced dataset and displays the number of -elements and raw data. Then it reads two selections: -a hyperslab selection and a point selection. The program queries a -number of points in the hyperslab and the coordinates and displays them. -Then it queries a number of selected points and their coordinates and -displays the information. - -
-   
-#include <stdlib.h>
-#include <hdf5.h>
-
-#define FILE2	"trefer2.h5"
-#define NPOINTS 10
- 
-/* 1-D dataset with fixed dimensions */
-#define SPACE1_NAME  "Space1"
-#define SPACE1_RANK	1
-#define SPACE1_DIM1	4
-
-/* 2-D dataset with fixed dimensions */
-#define SPACE2_NAME  "Space2"
-#define SPACE2_RANK	2
-#define SPACE2_DIM1	10
-#define SPACE2_DIM2	10
-
-int 
-main(void)
-{
-    hid_t		fid1;		/* HDF5 File IDs		*/
-    hid_t		dset1,	/* Dataset ID			*/
-                dset2;      /* Dereferenced dataset ID */
-    hid_t		sid1,       /* Dataspace ID	#1		*/
-                sid2;       /* Dataspace ID	#2		*/
-    hsize_t *   coords;             /* Coordinate buffer */
-    hsize_t		low[SPACE2_RANK];   /* Selection bounds */
-    hsize_t		high[SPACE2_RANK];     /* Selection bounds */
-    hdset_reg_ref_t      *rbuf;      /* buffer to to read disk */
-    int    *drbuf;      /* Buffer for reading numeric data from disk */
-    int        i, j;          /* counting variables */
-    herr_t		ret;		/* Generic return value		*/
-
-    /* Output message about test being performed */
-
-    /* Allocate write & read buffers */
-    rbuf=malloc(sizeof(hdset_reg_ref_t)*SPACE1_DIM1);
-    drbuf=calloc(sizeof(int),SPACE2_DIM1*SPACE2_DIM2);
-
-    /* Open the file */
-    fid1 = H5Fopen(FILE2, H5F_ACC_RDWR, H5P_DEFAULT);
-
-    /* Open the dataset */
-    dset1=H5Dopen(fid1,"/Dataset1");
-
-    /* Read selection from disk */
-    ret=H5Dread(dset1,H5T_STD_REF_DSETREG,H5S_ALL,H5S_ALL,H5P_DEFAULT,rbuf);
-
-    /* Try to open objects */
-    dset2 = H5Rdereference(dset1,H5R_DATASET_REGION,&rbuf[0]);
-
-    /* Check information in referenced dataset */
-    sid1 = H5Dget_space(dset2);
-
-    ret=H5Sget_simple_extent_npoints(sid1);
-    printf(" Number of elements in the dataset is : %d\n",ret);
-
-    /* Read from disk */
-    ret=H5Dread(dset2,H5T_NATIVE_INT,H5S_ALL,H5S_ALL,H5P_DEFAULT,drbuf);
-
-    for(i=0; i < SPACE2_DIM1; i++) {
-        for (j=0; j < SPACE2_DIM2; j++) printf (" %d ", drbuf[i*SPACE2_DIM2+j]);
-        printf("\n"); }
-
-    /* Get the hyperslab selection */
-    sid2=H5Rget_region(dset1,H5R_DATASET_REGION,&rbuf[0]);
-
-    /* Verify correct hyperslab selected */
-    ret = H5Sget_select_npoints(sid2);
-    printf(" Number of elements in the hyperslab is : %d \n", ret);
-    ret = H5Sget_select_hyper_nblocks(sid2);
-    coords=malloc(ret*SPACE2_RANK*sizeof(hsize_t)*2); /* allocate space for the hyperslab blocks */
-    ret = H5Sget_select_hyper_blocklist(sid2,0,ret,coords);
-    printf(" Hyperslab coordinates are : \n");
-    printf (" ( %lu , %lu ) ( %lu , %lu ) \n", \
-(unsigned long)coords[0],(unsigned long)coords[1],(unsigned long)coords[2],(unsigned long)coords[3]); 
-    free(coords);
-    ret = H5Sget_select_bounds(sid2,low,high);
-
-    /* Close region space */
-    ret = H5Sclose(sid2);
-
-    /* Get the element selection */
-    sid2=H5Rget_region(dset1,H5R_DATASET_REGION,&rbuf[1]);
-
-    /* Verify correct elements selected */
-    ret = H5Sget_select_elem_npoints(sid2);
-    printf(" Number of selected elements is : %d\n", ret);
-
-    /* Allocate space for the element points */
-    coords= malloc(ret*SPACE2_RANK*sizeof(hsize_t)); 
-    ret = H5Sget_select_elem_pointlist(sid2,0,ret,coords);
-    printf(" Coordinates of selected elements are : \n");
-    for (i=0; i < 2*NPOINTS; i=i+2) 
-         printf(" ( %lu , %lu ) \n", (unsigned long)coords[i],(unsigned long)coords[i+1]); 
-          
-    free(coords);
-    ret = H5Sget_select_bounds(sid2,low,high);
-
-    /* Close region space */
-    ret = H5Sclose(sid2);
-
-    /* Close first space */
-    ret = H5Sclose(sid1);
-
-    /* Close dereferenced Dataset */
-    ret = H5Dclose(dset2);
-
-    /* Close Dataset */
-    ret = H5Dclose(dset1);
-
-    /* Close file */
-    ret = H5Fclose(fid1);
-
-    /* Free memory buffers */
-    free(rbuf);
-    free(drbuf);
-    return 0;
-}   
-
-
- - -

  -

(Return to TOC) - - -


-
- - - -
-Introduction to HDF5 
-HDF5 User Guide  - -
-HDF5 Reference Manual 
-Other HDF5 documents and links  -
-
-
-
- - -
- -
-HDF Help Desk -
-Describes HDF5 Release 1.7, the unreleased development branch; working toward HDF5 Release 1.8.0 -
- -Last modified: 3 August 2004 - -
-Copyright   -
- - - - diff --git a/doc/html/Intro/Makefile.am b/doc/html/Intro/Makefile.am deleted file mode 100644 index 0cbc3a6..0000000 --- a/doc/html/Intro/Makefile.am +++ /dev/null @@ -1,17 +0,0 @@ -# HDF5 Library Doc Makefile(.in) -# -# Copyright (C) 1997, 2002 -# National Center for Supercomputing Applications. -# All rights reserved. -# -## -## Makefile.am -## Run automake to generate a Makefile.in from this file. -# - -include $(top_srcdir)/config/commence-doc.am - -localdocdir=$(docdir)/hdf5/Intro - -# Public doc files (to be installed)... -localdoc_DATA=IntroExamples.html diff --git a/doc/html/Intro/Makefile.in b/doc/html/Intro/Makefile.in deleted file mode 100644 index 5c71f3c..0000000 --- a/doc/html/Intro/Makefile.in +++ /dev/null @@ -1,485 +0,0 @@ -# Makefile.in generated by automake 1.9.5 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005 Free Software Foundation, Inc. -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# HDF5 Library Doc Makefile(.in) -# -# Copyright (C) 1997, 2002 -# National Center for Supercomputing Applications. -# All rights reserved. -# -# - -srcdir = @srcdir@ -top_srcdir = @top_srcdir@ -VPATH = @srcdir@ -pkgdatadir = $(datadir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -top_builddir = ../../.. -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -INSTALL = @INSTALL@ -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ - $(top_srcdir)/config/commence-doc.am \ - $(top_srcdir)/config/commence.am -subdir = doc/html/Intro -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/configure.in -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(SHELL) $(top_srcdir)/bin/mkinstalldirs -CONFIG_HEADER = $(top_builddir)/src/H5config.h -CONFIG_CLEAN_FILES = -SOURCES = -DIST_SOURCES = -am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; -am__vpath_adj = case $$p in \ - $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ - *) f=$$p;; \ - esac; -am__strip_dir = `echo $$p | sed -e 's|^.*/||'`; -am__installdirs = "$(DESTDIR)$(localdocdir)" -localdocDATA_INSTALL = $(INSTALL_DATA) -DATA = $(localdoc_DATA) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) - -# Set the paths for AFS installs of autotools for Linux machines -# Ideally, these tools should never be needed during the build. -ACLOCAL = /afs/ncsa/projects/hdf/packages/automake_1.9.5/Linux_2.4/bin/aclocal -I /afs/ncsa/projects/hdf/packages/libtool_1.5.14/Linux_2.4/share/aclocal -ADD_PARALLEL_FILES = @ADD_PARALLEL_FILES@ -AMDEP_FALSE = @AMDEP_FALSE@ -AMDEP_TRUE = @AMDEP_TRUE@ -AMTAR = @AMTAR@ -AM_MAKEFLAGS = @AM_MAKEFLAGS@ -AR = @AR@ -AUTOCONF = /afs/ncsa/projects/hdf/packages/autoconf_2.59/Linux_2.4/bin/autoconf -AUTOHEADER = /afs/ncsa/projects/hdf/packages/autoconf_2.59/Linux_2.4/bin/autoheader -AUTOMAKE = /afs/ncsa/projects/hdf/packages/automake_1.9.5/Linux_2.4/bin/automake -AWK = @AWK@ -BUILD_CXX_CONDITIONAL_FALSE = @BUILD_CXX_CONDITIONAL_FALSE@ -BUILD_CXX_CONDITIONAL_TRUE = @BUILD_CXX_CONDITIONAL_TRUE@ -BUILD_FORTRAN_CONDITIONAL_FALSE = @BUILD_FORTRAN_CONDITIONAL_FALSE@ -BUILD_FORTRAN_CONDITIONAL_TRUE = @BUILD_FORTRAN_CONDITIONAL_TRUE@ -BUILD_HDF5_HL_CONDITIONAL_FALSE = @BUILD_HDF5_HL_CONDITIONAL_FALSE@ -BUILD_HDF5_HL_CONDITIONAL_TRUE = @BUILD_HDF5_HL_CONDITIONAL_TRUE@ -BUILD_PABLO_CONDITIONAL_FALSE = @BUILD_PABLO_CONDITIONAL_FALSE@ -BUILD_PABLO_CONDITIONAL_TRUE = @BUILD_PABLO_CONDITIONAL_TRUE@ -BUILD_PARALLEL_CONDITIONAL_FALSE = @BUILD_PARALLEL_CONDITIONAL_FALSE@ -BUILD_PARALLEL_CONDITIONAL_TRUE = @BUILD_PARALLEL_CONDITIONAL_TRUE@ -BUILD_PDB2HDF = @BUILD_PDB2HDF@ -BUILD_PDB2HDF_CONDITIONAL_FALSE = @BUILD_PDB2HDF_CONDITIONAL_FALSE@ -BUILD_PDB2HDF_CONDITIONAL_TRUE = @BUILD_PDB2HDF_CONDITIONAL_TRUE@ -BYTESEX = @BYTESEX@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CC_VERSION = @CC_VERSION@ -CFLAGS = @CFLAGS@ -CONFIG_DATE = @CONFIG_DATE@ -CONFIG_MODE = @CONFIG_MODE@ -CONFIG_USER = @CONFIG_USER@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CXX = @CXX@ -CXXCPP = @CXXCPP@ -CXXDEPMODE = @CXXDEPMODE@ -CXXFLAGS = @CXXFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEBUG_PKG = @DEBUG_PKG@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -DYNAMIC_DIRS = @DYNAMIC_DIRS@ -ECHO = @ECHO@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -F77 = @F77@ - -# Make sure that these variables are exported to the Makefiles -F9XMODEXT = @F9XMODEXT@ -F9XMODFLAG = @F9XMODFLAG@ -F9XSUFFIXFLAG = @F9XSUFFIXFLAG@ -FC = @FC@ -FCFLAGS = @FCFLAGS@ -FCLIBS = @FCLIBS@ -FFLAGS = @FFLAGS@ -FILTERS = @FILTERS@ -FSEARCH_DIRS = @FSEARCH_DIRS@ -H5_VERSION = @H5_VERSION@ -HADDR_T = @HADDR_T@ -HDF5_INTERFACES = @HDF5_INTERFACES@ -HID_T = @HID_T@ -HL = @HL@ -HL_FOR = @HL_FOR@ -HSIZET = @HSIZET@ -HSIZE_T = @HSIZE_T@ -HSSIZE_T = @HSSIZE_T@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -INSTRUMENT_LIBRARY = @INSTRUMENT_LIBRARY@ -LDFLAGS = @LDFLAGS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LIBTOOL = @LIBTOOL@ -LN_S = @LN_S@ -LTLIBOBJS = @LTLIBOBJS@ -LT_STATIC_EXEC = @LT_STATIC_EXEC@ -MAINT = @MAINT@ -MAINTAINER_MODE_FALSE = @MAINTAINER_MODE_FALSE@ -MAINTAINER_MODE_TRUE = @MAINTAINER_MODE_TRUE@ -MAKEINFO = @MAKEINFO@ -MPE = @MPE@ -OBJECT_NAMELEN_DEFAULT_F = @OBJECT_NAMELEN_DEFAULT_F@ -OBJEXT = @OBJEXT@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PARALLEL = @PARALLEL@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PERL = @PERL@ -PTHREAD = @PTHREAD@ -RANLIB = @RANLIB@ -ROOT = @ROOT@ -RUNPARALLEL = @RUNPARALLEL@ -RUNSERIAL = @RUNSERIAL@ -R_INTEGER = @R_INTEGER@ -R_LARGE = @R_LARGE@ -SEARCH = @SEARCH@ -SETX = @SETX@ -SET_MAKE = @SET_MAKE@ - -# Hardcode SHELL to be /bin/sh. Most machines have this shell, and -# on at least one machine configure fails to detect its existence (janus). -# Also, when HDF5 is configured on one machine but run on another, -# configure's automatic SHELL detection may not work on the build machine. -SHELL = /bin/sh -SIZE_T = @SIZE_T@ -STATIC_SHARED = @STATIC_SHARED@ -STRIP = @STRIP@ -TESTPARALLEL = @TESTPARALLEL@ -TRACE_API = @TRACE_API@ -USE_FILTER_DEFLATE = @USE_FILTER_DEFLATE@ -USE_FILTER_FLETCHER32 = @USE_FILTER_FLETCHER32@ -USE_FILTER_NBIT = @USE_FILTER_NBIT@ -USE_FILTER_SCALEOFFSET = @USE_FILTER_SCALEOFFSET@ -USE_FILTER_SHUFFLE = @USE_FILTER_SHUFFLE@ -USE_FILTER_SZIP = @USE_FILTER_SZIP@ -VERSION = @VERSION@ -ac_ct_AR = @ac_ct_AR@ -ac_ct_CC = @ac_ct_CC@ -ac_ct_CXX = @ac_ct_CXX@ -ac_ct_F77 = @ac_ct_F77@ -ac_ct_FC = @ac_ct_FC@ -ac_ct_RANLIB = @ac_ct_RANLIB@ -ac_ct_STRIP = @ac_ct_STRIP@ -am__fastdepCC_FALSE = @am__fastdepCC_FALSE@ -am__fastdepCC_TRUE = @am__fastdepCC_TRUE@ -am__fastdepCXX_FALSE = @am__fastdepCXX_FALSE@ -am__fastdepCXX_TRUE = @am__fastdepCXX_TRUE@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_vendor = @build_vendor@ -datadir = @datadir@ -exec_prefix = @exec_prefix@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ - -# Install directories that automake doesn't know about -includedir = $(exec_prefix)/include -infodir = @infodir@ -install_sh = @install_sh@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localstatedir = @localstatedir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -oldincludedir = @oldincludedir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ - -# Shell commands used in Makefiles -RM = rm -f -CP = cp - -# Some machines need a command to run executables; this is that command -# so that our tests will run. -# We use RUNTESTS instead of RUNSERIAL directly because it may be that -# some tests need to be run with a different command. Older versions -# of the makefiles used the command -# $(LIBTOOL) --mode=execute -# in some directories, for instance. -RUNTESTS = $(RUNSERIAL) - -# Libraries to link to while building -LIBHDF5 = $(top_builddir)/src/libhdf5.la -LIBH5TEST = $(top_builddir)/test/libh5test.la -LIBH5F = $(top_builddir)/fortran/src/libhdf5_fortran.la -LIBH5FTEST = $(top_builddir)/fortran/test/libh5test_fortran.la -LIBH5CPP = $(top_builddir)/c++/src/libhdf5_cpp.la -LIBH5TOOLS = $(top_builddir)/tools/lib/libh5tools.la -LIBH5_HL = $(top_builddir)/hl/src/libhdf5_hl.la -LIBH5F_HL = $(top_builddir)/hl/fortran/src/libhdf5hl_fortran.la -LIBH5CPP_HL = $(top_builddir)/hl/c++/src/libhdf5_hl_cpp.la -docdir = $(exec_prefix)/doc - -# Scripts used to build examples -H5CC = $(bindir)/h5cc -H5CC_PP = $(bindir)/h5pcc -H5FC = $(bindir)/h5fc -H5FC_PP = $(bindir)/h5pfc - -# .chkexe and .chksh files are used to mark tests that have run successfully. -MOSTLYCLEANFILES = *.chkexe *.chksh -localdocdir = $(docdir)/hdf5/Intro - -# Public doc files (to be installed)... -localdoc_DATA = IntroExamples.html -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/config/commence-doc.am $(top_srcdir)/config/commence.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \ - && exit 0; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign doc/html/Intro/Makefile'; \ - cd $(top_srcdir) && \ - $(AUTOMAKE) --foreign doc/html/Intro/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -mostlyclean-libtool: - -rm -f *.lo - -clean-libtool: - -rm -rf .libs _libs - -distclean-libtool: - -rm -f libtool -uninstall-info-am: -install-localdocDATA: $(localdoc_DATA) - @$(NORMAL_INSTALL) - test -z "$(localdocdir)" || $(mkdir_p) "$(DESTDIR)$(localdocdir)" - @list='$(localdoc_DATA)'; for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - f=$(am__strip_dir) \ - echo " $(localdocDATA_INSTALL) '$$d$$p' '$(DESTDIR)$(localdocdir)/$$f'"; \ - $(localdocDATA_INSTALL) "$$d$$p" "$(DESTDIR)$(localdocdir)/$$f"; \ - done - -uninstall-localdocDATA: - @$(NORMAL_UNINSTALL) - @list='$(localdoc_DATA)'; for p in $$list; do \ - f=$(am__strip_dir) \ - echo " rm -f '$(DESTDIR)$(localdocdir)/$$f'"; \ - rm -f "$(DESTDIR)$(localdocdir)/$$f"; \ - done -tags: TAGS -TAGS: - -ctags: CTAGS -CTAGS: - - -distdir: $(DISTFILES) - $(mkdir_p) $(distdir)/../../../config - @srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's|.|.|g'`; \ - list='$(DISTFILES)'; for file in $$list; do \ - case $$file in \ - $(srcdir)/*) file=`echo "$$file" | sed "s|^$$srcdirstrip/||"`;; \ - $(top_srcdir)/*) file=`echo "$$file" | sed "s|^$$topsrcdirstrip/|$(top_builddir)/|"`;; \ - esac; \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - dir=`echo "$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test "$$dir" != "$$file" && test "$$dir" != "."; then \ - dir="/$$dir"; \ - $(mkdir_p) "$(distdir)$$dir"; \ - else \ - dir=''; \ - fi; \ - if test -d $$d/$$file; then \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \ - fi; \ - cp -pR $$d/$$file $(distdir)$$dir || exit 1; \ - else \ - test -f $(distdir)/$$file \ - || cp -p $$d/$$file $(distdir)/$$file \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: - for dir in "$(DESTDIR)$(localdocdir)"; do \ - test -z "$$dir" || $(mkdir_p) "$$dir"; \ - done -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - `test -z '$(STRIP)' || \ - echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install -mostlyclean-generic: - -test -z "$(MOSTLYCLEANFILES)" || rm -f $(MOSTLYCLEANFILES) - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." -clean: clean-am - -clean-am: clean-generic clean-libtool mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic distclean-libtool - -dvi: dvi-am - -dvi-am: - -html: html-am - -info: info-am - -info-am: - -install-data-am: install-localdocDATA - -install-exec-am: - -install-info: install-info-am - -install-man: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic mostlyclean-libtool - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: uninstall-info-am uninstall-localdocDATA - -.PHONY: all all-am check check-am clean clean-generic clean-libtool \ - distclean distclean-generic distclean-libtool distdir dvi \ - dvi-am html html-am info info-am install install-am \ - install-data install-data-am install-exec install-exec-am \ - install-info install-info-am install-localdocDATA install-man \ - install-strip installcheck installcheck-am installdirs \ - maintainer-clean maintainer-clean-generic mostlyclean \ - mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ - uninstall uninstall-am uninstall-info-am \ - uninstall-localdocDATA - - -# Ignore most rules -lib progs check test _test check-p check-s: - @echo "Nothing to be done" - -tests dep depend: - @@SETX@; for d in X $(SUBDIRS); do \ - if test $$d != X; then \ - (cd $$d && $(MAKE) $(AM_MAKEFLAGS) $@) || exit 1; \ - fi; - done - -# In docs directory, install-doc is the same as install -install-doc install-all: - $(MAKE) $(AM_MAKEFLAGS) install -uninstall-doc uninstall-all: - $(MAKE) $(AM_MAKEFLAGS) uninstall -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/doc/html/Lib_Maint.html b/doc/html/Lib_Maint.html deleted file mode 100644 index bff638c..0000000 --- a/doc/html/Lib_Maint.html +++ /dev/null @@ -1,113 +0,0 @@ -
-Information for HDF5 maintainers:
-
-* You can run make from any directory.  However, running in a
-  subdirectory only knows how to build things in that directory and
-  below.  However, all makefiles know when their target depends on
-  something outside the local directory tree:
-
-	$ cd test
-	$ make
-	make: *** No rule to make target ../src/libhdf5.a
-
-* All Makefiles understand the following targets:
-
-        all              -- build locally.
-        install          -- install libs, headers, progs.
-        uninstall        -- remove installed files.
-        mostlyclean      -- remove temp files (eg, *.o but not *.a).
-        clean            -- mostlyclean plus libs and progs.
-        distclean        -- all non-distributed files.
-        maintainer-clean -- all derived files but H5config.h.in and configure.
-
-* Most Makefiles also understand:
-
-	TAGS		-- build a tags table
-	dep, depend	-- recalculate source dependencies
-	lib		-- build just the libraries w/o programs
-
-* If you have personal preferences for which make, compiler, compiler
-  flags, preprocessor flags, etc., that you use and you don't want to
-  set environment variables, then use a site configuration file.
-
-  When configure starts, it looks in the config directory for files
-  whose name is some combination of the CPU name, vendor, and
-  operating system in this order:
-
-	CPU-VENDOR-OS
-	VENDOR-OS
-	CPU-VENDOR
-	OS
-	VENDOR
-	CPU
-
-  The first file which is found is sourced and can therefore affect
-  the behavior of the rest of configure. See config/BlankForm for the
-  template.
-
-* If you use GNU make along with gcc the Makefile will contain targets
-  that automatically maintain a list of source interdependencies; you
-  seldom have to say `make clean'.  I say `seldom' because if you
-  change how one `*.h' file includes other `*.h' files you'll have
-  to force an update.
-
-  To force an update of all dependency information remove the
-  `.depend' file from each directory and type `make'.  For
-  instance:
-
-	$ cd $HDF5_HOME
-	$ find . -name .depend -exec rm {} \;
-	$ make
-
-  If you're not using GNU make and gcc then dependencies come from
-  ".distdep" files in each directory.  Those files are generated on
-  GNU systems and inserted into the Makefile's by running
-  config.status (which happens near the end of configure).
-
-* If you use GNU make along with gcc then the Perl script `trace' is
-  run just before dependencies are calculated to update any H5TRACE()
-  calls that might appear in the file.  Otherwise, after changing the
-  type of a function (return type or argument types) one should run
-  `trace' manually on those source files (e.g., ../bin/trace *.c).
-
-* Object files stay in the directory and are added to the library as a
-  final step instead of placing the file in the library immediately
-  and removing it from the directory.  The reason is three-fold:
-
-	1.  Most versions of make don't allow `$(LIB)($(SRC:.c=.o))'
-	    which makes it necessary to have two lists of files, one
-	    that ends with `.c' and the other that has the library
-	    name wrapped around each `.o' file.
-
-	2.  Some versions of make/ar have problems with modification
-	    times of archive members.
-
-	3.  Adding object files immediately causes problems on SMP
-	    machines where make is doing more than one thing at a
-	    time.
-
-* When using GNU make on an SMP you can cause it to compile more than
-  one thing at a time.  At the top of the source tree invoke make as
-
-	$ make -j -l6
-
-  which causes make to fork as many children as possible as long as
-  the load average doesn't go above 6.  In subdirectories one can say
-
-	$ make -j2
-
-  which limits the number of children to two (this doesn't work at the
-  top level because the `-j2' is not passed to recursive makes).
-
-* To create a release tarball go to the top-level directory and run
-  ./bin/release.  You can optionally supply one or more of the words
-  `tar', `gzip', `bzip2' or `compress' on the command line.  The
-  result will be a (compressed) tar file(s) in the `releases'
-  directory.  The README file is updated to contain the release date
-  and version number.
-
-* To create a tarball of all the files which are part of HDF5 go to
-  the top-level directory and type:
-
-      tar cvf foo.tar `grep '^\.' MANIFEST |unexpand |cut -f1`
-
diff --git a/doc/html/Makefile.am b/doc/html/Makefile.am deleted file mode 100644 index 2d89255..0000000 --- a/doc/html/Makefile.am +++ /dev/null @@ -1,43 +0,0 @@ -# HDF5 Library Doc Makefile(.in) -# -# Copyright (C) 1997, 2002 -# National Center for Supercomputing Applications. -# All rights reserved. -# -# -# This is the top level makefile of the Doc directory. It mostly just -# reinvokes make in the various subdirectories. -# You can alternatively invoke make from each subdirectory manually. -## -## Makefile.am -## Run automake to generate a Makefile.in from this file. -# - -include $(top_srcdir)/config/commence-doc.am - -localdocdir=$(docdir)/hdf5 - -# Subdirectories in build-order -SUBDIRS=ADGuide Graphics Intro PSandPDF TechNotes Tutor \ - cpplus ed_libs ed_styles fortran - -# Public doc files (to be installed)... -localdoc_DATA=ADGuide.html Attributes.html Big.html Caching.html Chunk_f1.gif \ - Chunk_f2.gif Chunk_f3.gif Chunk_f4.gif Chunk_f5.gif Chunk_f6.gif \ - Chunking.html Coding.html Copyright.html Datasets.html \ - Dataspaces.html Datatypes.html DatatypesEnum.html Debugging.html \ - EnumMap.gif Environment.html Errors.html FF-IH_FileGroup.gif \ - FF-IH_FileObject.gif Files.html Filters.html Glossary.html \ - Groups.html H5.api_map.html H5.format.html H5.intro.html \ - H5.sample_code.html H5.user.PrintGen.html H5.user.PrintTpg.html \ - H5.user.html IH_map1.gif IH_map2.gif IH_map3.gif IH_map4.gif \ - IH_mapFoot.gif IH_mapHead.gif IOPipe.html MountingFiles.html \ - NCSAfooterlogo.gif Performance.html PredefDTypes.html \ - Properties.html RM_H5.html RM_H5A.html RM_H5D.html RM_H5E.html \ - RM_H5F.html RM_H5Front.html RM_H5G.html RM_H5I.html RM_H5P.html \ - RM_H5R.html RM_H5S.html RM_H5T.html RM_H5Z.html References.html \ - TechNotes.html Tools.html Version.html chunk1.gif compat.html \ - dataset_p1.gif ddl.html extern1.gif extern2.gif group_p1.gif \ - group_p2.gif group_p3.gif h5s.examples hdf2.jpg ph5design.html \ - ph5example.c ph5implement.txt pipe1.gif pipe2.gif pipe3.gif \ - pipe4.gif pipe5.gif index.html version.gif diff --git a/doc/html/Makefile.in b/doc/html/Makefile.in deleted file mode 100644 index 2f39b5f..0000000 --- a/doc/html/Makefile.in +++ /dev/null @@ -1,670 +0,0 @@ -# Makefile.in generated by automake 1.9.5 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005 Free Software Foundation, Inc. -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# HDF5 Library Doc Makefile(.in) -# -# Copyright (C) 1997, 2002 -# National Center for Supercomputing Applications. -# All rights reserved. -# -# -# This is the top level makefile of the Doc directory. It mostly just -# reinvokes make in the various subdirectories. -# You can alternatively invoke make from each subdirectory manually. -# - -srcdir = @srcdir@ -top_srcdir = @top_srcdir@ -VPATH = @srcdir@ -pkgdatadir = $(datadir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -top_builddir = ../.. -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -INSTALL = @INSTALL@ -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ - $(top_srcdir)/config/commence-doc.am \ - $(top_srcdir)/config/commence.am -subdir = doc/html -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/configure.in -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(SHELL) $(top_srcdir)/bin/mkinstalldirs -CONFIG_HEADER = $(top_builddir)/src/H5config.h -CONFIG_CLEAN_FILES = -SOURCES = -DIST_SOURCES = -RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ - html-recursive info-recursive install-data-recursive \ - install-exec-recursive install-info-recursive \ - install-recursive installcheck-recursive installdirs-recursive \ - pdf-recursive ps-recursive uninstall-info-recursive \ - uninstall-recursive -am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; -am__vpath_adj = case $$p in \ - $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ - *) f=$$p;; \ - esac; -am__strip_dir = `echo $$p | sed -e 's|^.*/||'`; -am__installdirs = "$(DESTDIR)$(localdocdir)" -localdocDATA_INSTALL = $(INSTALL_DATA) -DATA = $(localdoc_DATA) -ETAGS = etags -CTAGS = ctags -DIST_SUBDIRS = $(SUBDIRS) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) - -# Set the paths for AFS installs of autotools for Linux machines -# Ideally, these tools should never be needed during the build. -ACLOCAL = /afs/ncsa/projects/hdf/packages/automake_1.9.5/Linux_2.4/bin/aclocal -I /afs/ncsa/projects/hdf/packages/libtool_1.5.14/Linux_2.4/share/aclocal -ADD_PARALLEL_FILES = @ADD_PARALLEL_FILES@ -AMDEP_FALSE = @AMDEP_FALSE@ -AMDEP_TRUE = @AMDEP_TRUE@ -AMTAR = @AMTAR@ -AM_MAKEFLAGS = @AM_MAKEFLAGS@ -AR = @AR@ -AUTOCONF = /afs/ncsa/projects/hdf/packages/autoconf_2.59/Linux_2.4/bin/autoconf -AUTOHEADER = /afs/ncsa/projects/hdf/packages/autoconf_2.59/Linux_2.4/bin/autoheader -AUTOMAKE = /afs/ncsa/projects/hdf/packages/automake_1.9.5/Linux_2.4/bin/automake -AWK = @AWK@ -BUILD_CXX_CONDITIONAL_FALSE = @BUILD_CXX_CONDITIONAL_FALSE@ -BUILD_CXX_CONDITIONAL_TRUE = @BUILD_CXX_CONDITIONAL_TRUE@ -BUILD_FORTRAN_CONDITIONAL_FALSE = @BUILD_FORTRAN_CONDITIONAL_FALSE@ -BUILD_FORTRAN_CONDITIONAL_TRUE = @BUILD_FORTRAN_CONDITIONAL_TRUE@ -BUILD_HDF5_HL_CONDITIONAL_FALSE = @BUILD_HDF5_HL_CONDITIONAL_FALSE@ -BUILD_HDF5_HL_CONDITIONAL_TRUE = @BUILD_HDF5_HL_CONDITIONAL_TRUE@ -BUILD_PABLO_CONDITIONAL_FALSE = @BUILD_PABLO_CONDITIONAL_FALSE@ -BUILD_PABLO_CONDITIONAL_TRUE = @BUILD_PABLO_CONDITIONAL_TRUE@ -BUILD_PARALLEL_CONDITIONAL_FALSE = @BUILD_PARALLEL_CONDITIONAL_FALSE@ -BUILD_PARALLEL_CONDITIONAL_TRUE = @BUILD_PARALLEL_CONDITIONAL_TRUE@ -BUILD_PDB2HDF = @BUILD_PDB2HDF@ -BUILD_PDB2HDF_CONDITIONAL_FALSE = @BUILD_PDB2HDF_CONDITIONAL_FALSE@ -BUILD_PDB2HDF_CONDITIONAL_TRUE = @BUILD_PDB2HDF_CONDITIONAL_TRUE@ -BYTESEX = @BYTESEX@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CC_VERSION = @CC_VERSION@ -CFLAGS = @CFLAGS@ -CONFIG_DATE = @CONFIG_DATE@ -CONFIG_MODE = @CONFIG_MODE@ -CONFIG_USER = @CONFIG_USER@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CXX = @CXX@ -CXXCPP = @CXXCPP@ -CXXDEPMODE = @CXXDEPMODE@ -CXXFLAGS = @CXXFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEBUG_PKG = @DEBUG_PKG@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -DYNAMIC_DIRS = @DYNAMIC_DIRS@ -ECHO = @ECHO@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -F77 = @F77@ - -# Make sure that these variables are exported to the Makefiles -F9XMODEXT = @F9XMODEXT@ -F9XMODFLAG = @F9XMODFLAG@ -F9XSUFFIXFLAG = @F9XSUFFIXFLAG@ -FC = @FC@ -FCFLAGS = @FCFLAGS@ -FCLIBS = @FCLIBS@ -FFLAGS = @FFLAGS@ -FILTERS = @FILTERS@ -FSEARCH_DIRS = @FSEARCH_DIRS@ -H5_VERSION = @H5_VERSION@ -HADDR_T = @HADDR_T@ -HDF5_INTERFACES = @HDF5_INTERFACES@ -HID_T = @HID_T@ -HL = @HL@ -HL_FOR = @HL_FOR@ -HSIZET = @HSIZET@ -HSIZE_T = @HSIZE_T@ -HSSIZE_T = @HSSIZE_T@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -INSTRUMENT_LIBRARY = @INSTRUMENT_LIBRARY@ -LDFLAGS = @LDFLAGS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LIBTOOL = @LIBTOOL@ -LN_S = @LN_S@ -LTLIBOBJS = @LTLIBOBJS@ -LT_STATIC_EXEC = @LT_STATIC_EXEC@ -MAINT = @MAINT@ -MAINTAINER_MODE_FALSE = @MAINTAINER_MODE_FALSE@ -MAINTAINER_MODE_TRUE = @MAINTAINER_MODE_TRUE@ -MAKEINFO = @MAKEINFO@ -MPE = @MPE@ -OBJECT_NAMELEN_DEFAULT_F = @OBJECT_NAMELEN_DEFAULT_F@ -OBJEXT = @OBJEXT@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PARALLEL = @PARALLEL@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PERL = @PERL@ -PTHREAD = @PTHREAD@ -RANLIB = @RANLIB@ -ROOT = @ROOT@ -RUNPARALLEL = @RUNPARALLEL@ -RUNSERIAL = @RUNSERIAL@ -R_INTEGER = @R_INTEGER@ -R_LARGE = @R_LARGE@ -SEARCH = @SEARCH@ -SETX = @SETX@ -SET_MAKE = @SET_MAKE@ - -# Hardcode SHELL to be /bin/sh. Most machines have this shell, and -# on at least one machine configure fails to detect its existence (janus). -# Also, when HDF5 is configured on one machine but run on another, -# configure's automatic SHELL detection may not work on the build machine. -SHELL = /bin/sh -SIZE_T = @SIZE_T@ -STATIC_SHARED = @STATIC_SHARED@ -STRIP = @STRIP@ -TESTPARALLEL = @TESTPARALLEL@ -TRACE_API = @TRACE_API@ -USE_FILTER_DEFLATE = @USE_FILTER_DEFLATE@ -USE_FILTER_FLETCHER32 = @USE_FILTER_FLETCHER32@ -USE_FILTER_NBIT = @USE_FILTER_NBIT@ -USE_FILTER_SCALEOFFSET = @USE_FILTER_SCALEOFFSET@ -USE_FILTER_SHUFFLE = @USE_FILTER_SHUFFLE@ -USE_FILTER_SZIP = @USE_FILTER_SZIP@ -VERSION = @VERSION@ -ac_ct_AR = @ac_ct_AR@ -ac_ct_CC = @ac_ct_CC@ -ac_ct_CXX = @ac_ct_CXX@ -ac_ct_F77 = @ac_ct_F77@ -ac_ct_FC = @ac_ct_FC@ -ac_ct_RANLIB = @ac_ct_RANLIB@ -ac_ct_STRIP = @ac_ct_STRIP@ -am__fastdepCC_FALSE = @am__fastdepCC_FALSE@ -am__fastdepCC_TRUE = @am__fastdepCC_TRUE@ -am__fastdepCXX_FALSE = @am__fastdepCXX_FALSE@ -am__fastdepCXX_TRUE = @am__fastdepCXX_TRUE@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_vendor = @build_vendor@ -datadir = @datadir@ -exec_prefix = @exec_prefix@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ - -# Install directories that automake doesn't know about -includedir = $(exec_prefix)/include -infodir = @infodir@ -install_sh = @install_sh@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localstatedir = @localstatedir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -oldincludedir = @oldincludedir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ - -# Shell commands used in Makefiles -RM = rm -f -CP = cp - -# Some machines need a command to run executables; this is that command -# so that our tests will run. -# We use RUNTESTS instead of RUNSERIAL directly because it may be that -# some tests need to be run with a different command. Older versions -# of the makefiles used the command -# $(LIBTOOL) --mode=execute -# in some directories, for instance. -RUNTESTS = $(RUNSERIAL) - -# Libraries to link to while building -LIBHDF5 = $(top_builddir)/src/libhdf5.la -LIBH5TEST = $(top_builddir)/test/libh5test.la -LIBH5F = $(top_builddir)/fortran/src/libhdf5_fortran.la -LIBH5FTEST = $(top_builddir)/fortran/test/libh5test_fortran.la -LIBH5CPP = $(top_builddir)/c++/src/libhdf5_cpp.la -LIBH5TOOLS = $(top_builddir)/tools/lib/libh5tools.la -LIBH5_HL = $(top_builddir)/hl/src/libhdf5_hl.la -LIBH5F_HL = $(top_builddir)/hl/fortran/src/libhdf5hl_fortran.la -LIBH5CPP_HL = $(top_builddir)/hl/c++/src/libhdf5_hl_cpp.la -docdir = $(exec_prefix)/doc - -# Scripts used to build examples -H5CC = $(bindir)/h5cc -H5CC_PP = $(bindir)/h5pcc -H5FC = $(bindir)/h5fc -H5FC_PP = $(bindir)/h5pfc - -# .chkexe and .chksh files are used to mark tests that have run successfully. -MOSTLYCLEANFILES = *.chkexe *.chksh -localdocdir = $(docdir)/hdf5 - -# Subdirectories in build-order -SUBDIRS = ADGuide Graphics Intro PSandPDF TechNotes Tutor \ - cpplus ed_libs ed_styles fortran - - -# Public doc files (to be installed)... -localdoc_DATA = ADGuide.html Attributes.html Big.html Caching.html Chunk_f1.gif \ - Chunk_f2.gif Chunk_f3.gif Chunk_f4.gif Chunk_f5.gif Chunk_f6.gif \ - Chunking.html Coding.html Copyright.html Datasets.html \ - Dataspaces.html Datatypes.html DatatypesEnum.html Debugging.html \ - EnumMap.gif Environment.html Errors.html FF-IH_FileGroup.gif \ - FF-IH_FileObject.gif Files.html Filters.html Glossary.html \ - Groups.html H5.api_map.html H5.format.html H5.intro.html \ - H5.sample_code.html H5.user.PrintGen.html H5.user.PrintTpg.html \ - H5.user.html IH_map1.gif IH_map2.gif IH_map3.gif IH_map4.gif \ - IH_mapFoot.gif IH_mapHead.gif IOPipe.html MountingFiles.html \ - NCSAfooterlogo.gif Performance.html PredefDTypes.html \ - Properties.html RM_H5.html RM_H5A.html RM_H5D.html RM_H5E.html \ - RM_H5F.html RM_H5Front.html RM_H5G.html RM_H5I.html RM_H5P.html \ - RM_H5R.html RM_H5S.html RM_H5T.html RM_H5Z.html References.html \ - TechNotes.html Tools.html Version.html chunk1.gif compat.html \ - dataset_p1.gif ddl.html extern1.gif extern2.gif group_p1.gif \ - group_p2.gif group_p3.gif h5s.examples hdf2.jpg ph5design.html \ - ph5example.c ph5implement.txt pipe1.gif pipe2.gif pipe3.gif \ - pipe4.gif pipe5.gif index.html version.gif - -all: all-recursive - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/config/commence-doc.am $(top_srcdir)/config/commence.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \ - && exit 0; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign doc/html/Makefile'; \ - cd $(top_srcdir) && \ - $(AUTOMAKE) --foreign doc/html/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -mostlyclean-libtool: - -rm -f *.lo - -clean-libtool: - -rm -rf .libs _libs - -distclean-libtool: - -rm -f libtool -uninstall-info-am: -install-localdocDATA: $(localdoc_DATA) - @$(NORMAL_INSTALL) - test -z "$(localdocdir)" || $(mkdir_p) "$(DESTDIR)$(localdocdir)" - @list='$(localdoc_DATA)'; for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - f=$(am__strip_dir) \ - echo " $(localdocDATA_INSTALL) '$$d$$p' '$(DESTDIR)$(localdocdir)/$$f'"; \ - $(localdocDATA_INSTALL) "$$d$$p" "$(DESTDIR)$(localdocdir)/$$f"; \ - done - -uninstall-localdocDATA: - @$(NORMAL_UNINSTALL) - @list='$(localdoc_DATA)'; for p in $$list; do \ - f=$(am__strip_dir) \ - echo " rm -f '$(DESTDIR)$(localdocdir)/$$f'"; \ - rm -f "$(DESTDIR)$(localdocdir)/$$f"; \ - done - -# This directory's subdirectories are mostly independent; you can cd -# into them and run `make' without going through this Makefile. -# To change the values of `make' variables: instead of editing Makefiles, -# (1) if the variable is set in `config.status', edit `config.status' -# (which will cause the Makefiles to be regenerated when you run `make'); -# (2) otherwise, pass the desired values on the `make' command line. -$(RECURSIVE_TARGETS): - @failcom='exit 1'; \ - for f in x $$MAKEFLAGS; do \ - case $$f in \ - *=* | --[!k]*);; \ - *k*) failcom='fail=yes';; \ - esac; \ - done; \ - dot_seen=no; \ - target=`echo $@ | sed s/-recursive//`; \ - list='$(SUBDIRS)'; for subdir in $$list; do \ - echo "Making $$target in $$subdir"; \ - if test "$$subdir" = "."; then \ - dot_seen=yes; \ - local_target="$$target-am"; \ - else \ - local_target="$$target"; \ - fi; \ - (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ - || eval $$failcom; \ - done; \ - if test "$$dot_seen" = "no"; then \ - $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ - fi; test -z "$$fail" - -mostlyclean-recursive clean-recursive distclean-recursive \ -maintainer-clean-recursive: - @failcom='exit 1'; \ - for f in x $$MAKEFLAGS; do \ - case $$f in \ - *=* | --[!k]*);; \ - *k*) failcom='fail=yes';; \ - esac; \ - done; \ - dot_seen=no; \ - case "$@" in \ - distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ - *) list='$(SUBDIRS)' ;; \ - esac; \ - rev=''; for subdir in $$list; do \ - if test "$$subdir" = "."; then :; else \ - rev="$$subdir $$rev"; \ - fi; \ - done; \ - rev="$$rev ."; \ - target=`echo $@ | sed s/-recursive//`; \ - for subdir in $$rev; do \ - echo "Making $$target in $$subdir"; \ - if test "$$subdir" = "."; then \ - local_target="$$target-am"; \ - else \ - local_target="$$target"; \ - fi; \ - (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ - || eval $$failcom; \ - done && test -z "$$fail" -tags-recursive: - list='$(SUBDIRS)'; for subdir in $$list; do \ - test "$$subdir" = . || (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ - done -ctags-recursive: - list='$(SUBDIRS)'; for subdir in $$list; do \ - test "$$subdir" = . || (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ - done - -ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) - list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ - mkid -fID $$unique -tags: TAGS - -TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ - $(TAGS_FILES) $(LISP) - tags=; \ - here=`pwd`; \ - if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ - include_option=--etags-include; \ - empty_fix=.; \ - else \ - include_option=--include; \ - empty_fix=; \ - fi; \ - list='$(SUBDIRS)'; for subdir in $$list; do \ - if test "$$subdir" = .; then :; else \ - test ! -f $$subdir/TAGS || \ - tags="$$tags $$include_option=$$here/$$subdir/TAGS"; \ - fi; \ - done; \ - list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ - if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ - test -n "$$unique" || unique=$$empty_fix; \ - $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ - $$tags $$unique; \ - fi -ctags: CTAGS -CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ - $(TAGS_FILES) $(LISP) - tags=; \ - here=`pwd`; \ - list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ - test -z "$(CTAGS_ARGS)$$tags$$unique" \ - || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ - $$tags $$unique - -GTAGS: - here=`$(am__cd) $(top_builddir) && pwd` \ - && cd $(top_srcdir) \ - && gtags -i $(GTAGS_ARGS) $$here - -distclean-tags: - -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags - -distdir: $(DISTFILES) - $(mkdir_p) $(distdir)/../../config - @srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's|.|.|g'`; \ - list='$(DISTFILES)'; for file in $$list; do \ - case $$file in \ - $(srcdir)/*) file=`echo "$$file" | sed "s|^$$srcdirstrip/||"`;; \ - $(top_srcdir)/*) file=`echo "$$file" | sed "s|^$$topsrcdirstrip/|$(top_builddir)/|"`;; \ - esac; \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - dir=`echo "$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test "$$dir" != "$$file" && test "$$dir" != "."; then \ - dir="/$$dir"; \ - $(mkdir_p) "$(distdir)$$dir"; \ - else \ - dir=''; \ - fi; \ - if test -d $$d/$$file; then \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \ - fi; \ - cp -pR $$d/$$file $(distdir)$$dir || exit 1; \ - else \ - test -f $(distdir)/$$file \ - || cp -p $$d/$$file $(distdir)/$$file \ - || exit 1; \ - fi; \ - done - list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ - if test "$$subdir" = .; then :; else \ - test -d "$(distdir)/$$subdir" \ - || $(mkdir_p) "$(distdir)/$$subdir" \ - || exit 1; \ - distdir=`$(am__cd) $(distdir) && pwd`; \ - top_distdir=`$(am__cd) $(top_distdir) && pwd`; \ - (cd $$subdir && \ - $(MAKE) $(AM_MAKEFLAGS) \ - top_distdir="$$top_distdir" \ - distdir="$$distdir/$$subdir" \ - distdir) \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-recursive -all-am: Makefile $(DATA) -installdirs: installdirs-recursive -installdirs-am: - for dir in "$(DESTDIR)$(localdocdir)"; do \ - test -z "$$dir" || $(mkdir_p) "$$dir"; \ - done -install: install-recursive -install-exec: install-exec-recursive -install-data: install-data-recursive -uninstall: uninstall-recursive - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-recursive -install-strip: - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - `test -z '$(STRIP)' || \ - echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install -mostlyclean-generic: - -test -z "$(MOSTLYCLEANFILES)" || rm -f $(MOSTLYCLEANFILES) - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." -clean: clean-recursive - -clean-am: clean-generic clean-libtool mostlyclean-am - -distclean: distclean-recursive - -rm -f Makefile -distclean-am: clean-am distclean-generic distclean-libtool \ - distclean-tags - -dvi: dvi-recursive - -dvi-am: - -html: html-recursive - -info: info-recursive - -info-am: - -install-data-am: install-localdocDATA - -install-exec-am: - -install-info: install-info-recursive - -install-man: - -installcheck-am: - -maintainer-clean: maintainer-clean-recursive - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-recursive - -mostlyclean-am: mostlyclean-generic mostlyclean-libtool - -pdf: pdf-recursive - -pdf-am: - -ps: ps-recursive - -ps-am: - -uninstall-am: uninstall-info-am uninstall-localdocDATA - -uninstall-info: uninstall-info-recursive - -.PHONY: $(RECURSIVE_TARGETS) CTAGS GTAGS all all-am check check-am \ - clean clean-generic clean-libtool clean-recursive ctags \ - ctags-recursive distclean distclean-generic distclean-libtool \ - distclean-recursive distclean-tags distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-exec install-exec-am install-info \ - install-info-am install-localdocDATA install-man install-strip \ - installcheck installcheck-am installdirs installdirs-am \ - maintainer-clean maintainer-clean-generic \ - maintainer-clean-recursive mostlyclean mostlyclean-generic \ - mostlyclean-libtool mostlyclean-recursive pdf pdf-am ps ps-am \ - tags tags-recursive uninstall uninstall-am uninstall-info-am \ - uninstall-localdocDATA - - -# Ignore most rules -lib progs check test _test check-p check-s: - @echo "Nothing to be done" - -tests dep depend: - @@SETX@; for d in X $(SUBDIRS); do \ - if test $$d != X; then \ - (cd $$d && $(MAKE) $(AM_MAKEFLAGS) $@) || exit 1; \ - fi; - done - -# In docs directory, install-doc is the same as install -install-doc install-all: - $(MAKE) $(AM_MAKEFLAGS) install -uninstall-doc uninstall-all: - $(MAKE) $(AM_MAKEFLAGS) uninstall -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/doc/html/MemoryManagement.html b/doc/html/MemoryManagement.html deleted file mode 100644 index c93dc10..0000000 --- a/doc/html/MemoryManagement.html +++ /dev/null @@ -1,510 +0,0 @@ - - - - Memory Management in HDF5 - - - -

Memory Management in HDF5

- - -

Is a Memory Manager Necessary?

- -

Some form of memory management may be necessary in HDF5 when - the various deletion operators are implemented so that the - file memory is not permanently orphaned. However, since an - HDF5 file was designed with persistent data in mind, the - importance of a memory manager is questionable. - -

On the other hand, when certain meta data containers (file glue) - grow, they may need to be relocated in order to keep the - container contiguous. - -

- Example: An object header consists of up to two - chunks of contiguous memory. The first chunk is a fixed - size at a fixed location when the header link count is - greater than one. Thus, inserting additional items into an - object header may require the second chunk to expand. When - this occurs, the second chunk may need to move to another - location in the file, freeing the file memory which that - chunk originally occupied. -
- -

The relocation of meta data containers could potentially - orphan a significant amount of file memory if the application - has made poor estimates for preallocation sizes. - - -

Levels of Memory Management

- -

Memory management by the library can be independent of memory - management support by the file format. The file format can - support no memory management, some memory management, or full - memory management. Similarly with the library. - -

Support in the Library

- -
-
No Support: I -
When memory is deallocated it simply becomes unreferenced - (orphaned) in the file. Memory allocation requests are - satisfied by extending the file. - -
A separate off-line utility can be used to detect the - unreferenced bytes of a file and "bubble" them up to the end - of the file and then truncate the file. - -
Some Support: II -
The library could support partial memory management all - the time, or full memory management some of the time. - Orphaning free blocks instead of adding them to a free list - should not affect the file integrity, nor should fulfilling - new requests by extending the file instead of using the free - list. - -
Full Support: III -
The library supports space-efficient memory management by - always fulfilling allocation requests from the free list when - possible, and by coalescing adjacent free blocks into a - single larger free block. -
- -

Support in the File Format

- -
-
No Support: A -
The file format does not support memory management; any - unreferenced block in the file is assumed to be free. If - the library supports full memory management then it will - have to traverse the entire file to determine which blocks - are unreferenced. - -
Some Support: B -
Assuming that unreferenced blocks are free can be - dangerous in a situation where the file is not consistent. - For instance, if a directory tree becomes detached from the - main directory hierarchy, then the detached directory and - everything that is referenced only through the detached - directory become unreferenced. File repair utilities will - be unable to determine which unreferenced blocks need to be - linked back into the file hierarchy. - -
Therefore, it might be useful to keep an unsorted, - doubly-linked list of free blocks in the file. The library - can add and remove blocks from the list in constant time, - and can generate its own internal free-block data structure - in time proportional to the number of free blocks instead of - the size of the file. Additionally, a library can use a - subset of the free blocks, an alternative which is not - feasible if the file format doesn't support any form of - memory management. - -
Full Support: C -
The file format can mirror library data structures for - space-efficient memory management. The free blocks are - linked in unsorted, doubly-linked lists with one list per - free block size. The heads of the lists are pointed to by a - B-tree whose nodes are sorted by free block size. At the - same time, all free blocks are the leaf nodes of another - B-tree sorted by starting and ending address. When the - trees are used in combination we can deallocate and allocate - memory in O(log N) time where N is the - number of free blocks. -
- -

Combinations of Library and File Format Support

- -

We now evaluate each combination of library support with file - support: - -

-
I-A -
If neither the library nor the file support memory - management, then each allocation request will come from the - end of the file and each deallocation request is a no-op - that simply leaves the free block unreferenced. - -
    -
  • Advantages -
      -
    • No file overhead for allocation or deallocation. -
    • No library overhead for allocation or - deallocation. -
    • No file traversal required at time of open. -
    • No data needs to be written back to the file when - it's closed. -
    • Trivial to implement (already implemented). -
    - -
  • Disadvantages -
      -
    • Inefficient use of file space. -
    • A file repair utility must reclaim lost file space. -
    • Difficulties for file repair utilities. (Is an - unreferenced block a free block or orphaned data?) -
    -
- -
II-A -
In order for the library to support memory management, it - will be required to build the internal free block - representation by traversing the entire file looking for - unreferenced blocks. - -
    -
  • Advantages -
      -
    • No file overhead for allocation or deallocation. -
    • Variable amount of library overhead for allocation - and deallocation depending on how much work the - library wants to do. -
    • No data needs to be written back to the file when - it's closed. -
    • Might use file space efficiently. -
    -
  • Disadvantages -
      -
    • Might use file space inefficiently. -
    • File traversal required at time of open. -
    • A file repair utility must reclaim lost file space. -
    • Difficulties for file repair utilities. -
    • Sharing of the free list between processes falls - outside the HDF5 file format documentation. -
    -
- -
III-A -
In order for the library to support full memory - management, it will be required to build the internal free - block representation by traversing the entire file looking - for unreferenced blocks. - -
    -
  • Advantages -
      -
    • No file overhead for allocation or deallocation. -
    • Efficient use of file space. -
    • No data needs to be written back to the file when - it's closed. -
    -
  • Disadvantages -
      -
    • Moderate amount of library overhead for allocation - and deallocation. -
    • File traversal required at time of open. -
    • A file repair utility must reclaim lost file space. -
    • Difficulties for file repair utilities. -
    • Sharing of the free list between processes falls - outside the HDF5 file format documentation. -
    -
- -
I-B -
If the library doesn't support memory management but the - file format supports some level of management, then a file - repair utility will have to be run occasionally to reclaim - unreferenced blocks. - -
    -
  • Advantages -
      -
    • No file overhead for allocation or deallocation. -
    • No library overhead for allocation or - deallocation. -
    • No file traversal required at time of open. -
    • No data needs to be written back to the file when - it's closed. -
    -
  • Disadvantages -
      -
    • A file repair utility must reclaim lost file space. -
    • Difficulties for file repair utilities. -
    -
- -
II-B -
Both the library and the file format support some level - of memory management. - -
    -
  • Advantages -
      -
    • Constant file overhead per allocation or - deallocation. -
    • Variable library overhead per allocation or - deallocation depending on how much work the library - wants to do. -
    • Traversal at file open time is on the order of the - free list size instead of the file size. -
    • The library has the option of reading only part of - the free list. -
    • No data needs to be written at file close time if - it has been amortized into the cost of allocation - and deallocation. -
    • File repair utilties don't have to be run to - reclaim memory. -
    • File repair utilities can detect whether an - unreferenced block is a free block or orphaned data. -
    • Sharing of the free list between processes might - be easier. -
    • Possible efficient use of file space. -
    -
  • Disadvantages -
      -
    • Possible inefficient use of file space. -
    -
- -
III-B -
The library provides space-efficient memory management but - the file format only supports an unsorted list of free - blocks. - -
    -
  • Advantages -
      -
    • Constant time file overhead per allocation or - deallocation. -
    • No data needs to be written at file close time if - it has been amortized into the cost of allocation - and deallocation. -
    • File repair utilities don't have to be run to - reclaim memory. -
    • File repair utilities can detect whether an - unreferenced block is a free block or orphaned data. -
    • Sharing of the free list between processes might - be easier. -
    • Efficient use of file space. -
    -
  • Disadvantages -
      -
    • O(log N) library overhead per allocation or - deallocation where N is the total number of - free blocks. -
    • O(N) time to open a file since the entire - free list must be read to construct the in-core - trees used by the library. -
    • Library is more complicated. -
    -
- -
I-C -
This has the same advantages and disadvantages as I-C with - the added disadvantage that the file format is much more - complicated. - -
II-C -
If the library only provides partial memory management but - the file requires full memory management, then this method - degenerates to the same as II-A with the added disadvantage - that the file format is much more complicated. - -
III-C -
The library and file format both provide complete data - structures for space-efficient memory management. - -
    -
  • Advantages -
      -
    • Files can be opened in constant time since the - free list is read on demand and amortised into the - allocation and deallocation requests. -
    • No data needs to be written back to the file when - it's closed. -
    • File repair utilities don't have to be run to - reclaim memory. -
    • File repair utilities can detect whether an - unreferenced block is a free block or orphaned data. -
    • Sharing the free list between processes is easy. -
    • Efficient use of file space. -
    -
  • Disadvantages -
      -
    • O(log N) file allocation and deallocation - cost where N is the total number of free - blocks. -
    • O(log N) library allocation and - deallocation cost. -
    • Much more complicated file format. -
    • More complicated library. -
    -
- -
- - -

The Algorithm for II-B

- -

The file contains an unsorted, doubly-linked list of free - blocks. The address of the head of the list appears in the - super block. Each free block contains the following fields: - -

- - - - - - - - - - - - - - - - - - - - - -
bytebytebytebyte
Free Block Signature
Total Free Block Size
Address of Left Sibling
Address of Right Sibling


Remainder of Free Block


-
- -

The library reads as much of the free list as convenient when - convenient and pushes those entries onto stacks. This can - occur when a file is opened or any time during the life of the - file. There is one stack for each free block size and the - stacks are sorted by size in a balanced tree in memory. - -

Deallocation involves finding the correct stack or creating - a new one (an O(log K) operation where K is - the number of stacks), pushing the free block info onto the - stack (a constant-time operation), and inserting the free - block into the file free block list (a constant-time operation - which doesn't necessarily involve any I/O since the free blocks - can be cached like other objects). No attempt is made to - coalesce adjacent free blocks into larger blocks. - -

Allocation involves finding the correct stack (an O(log - K) operation), removing the top item from the stack - (a constant-time operation), and removing the block from the - file free block list (a constant-time operation). If there is - no free block of the requested size or larger, then the file - is extended. - -

To provide sharability of the free list between processes, - the last step of an allocation will check for the free block - signature and if it doesn't find one will repeat the process. - Alternatively, a process can temporarily remove free blocks - from the file and hold them in it's own private pool. - -

To summarize... -

-
File opening -
O(N) amortized over the time the file is open, - where N is the number of free blocks. The library - can still function without reading any of the file free - block list. - -
Deallocation -
O(log K) where K is the number of unique - sizes of free blocks. File access is constant. - -
Allocation -
O(log K). File access is constant. - -
File closing -
O(1) even if the library temporarily removes free - blocks from the file to hold them in a private pool since - the pool can still be a linked list on disk. -
- - -

The Algorithm for III-C

- -

The HDF5 file format supports a general B-tree mechanism - for storing data with keys. If we use a B-tree to represent - all parts of the file that are free and the B-tree is indexed - so that a free file chunk can be found if we know the starting - or ending address, then we can efficiently determine whether a - free chunk begins or ends at the specified address. Call this - the Address B-Tree. - -

If a second B-tree points to a set of stacks where the - members of a particular stack are all free chunks of the same - size, and the tree is indexed by chunk size, then we can - efficiently find the best-fit chunk size for a memory request. - Call this the Size B-Tree. - -

All free blocks of a particular size can be linked together - with an unsorted, doubly-linked, circular list and the left - and right sibling addresses can be stored within the free - chunk, allowing us to remove or insert items from the list in - constant time. - -

Deallocation of a block fo file memory consists of: - -

    -
  1. Add the new free block whose address is ADDR to the - address B-tree. - -
      -
    1. If the address B-tree contains an entry for a free - block that ends at ADDR-1 then remove that - block from the B-tree and from the linked list (if the - block was the first on the list then the size B-tree - must be updated). Adjust the size and address of the - block being freed to include the block just removed from - the free list. The time required to search for and - possibly remove the left block is O(log N) - where N is the number of free blocks. - -
    2. If the address B-tree contains an entry for the free - block that begins at ADDR+LENGTH then - remove that block from the B-tree and from the linked - list (if the block was the first on the list then the - size B-tree must be updated). Adjust the size of the - block being freed to include the block just removed from - the free list. The time required to search for and - possibly remove the right block is O(log N). - -
    3. Add the new (adjusted) block to the address B-tree. - The time for this operation is O(log N). -
    - -
  2. Add the new block to the size B-tree and linked list. - -
      -
    1. If the size B-tree has an entry for this particular - size, then add the chunk to the tail of the list. This - is an O(log K) operation where K is - the number of unique free block sizes. - -
    2. Otherwise make a new entry in the B-tree for chunks of - this size. This is also O(log K). -
    -
- -

Allocation is similar to deallocation. - -

To summarize... - -

-
File opening -
O(1) - -
Deallocation -
O(log N) where N is the total number of - free blocks. File access time is O(log N). - -
Allocation -
O(log N). File access time is O(log N). - -
File closing -
O(1). -
- - -
-
Robb Matzke
- - -Last modified: Thu Jul 31 14:41:01 EST - - - diff --git a/doc/html/MountingFiles.html b/doc/html/MountingFiles.html deleted file mode 100644 index 16f9115..0000000 --- a/doc/html/MountingFiles.html +++ /dev/null @@ -1,427 +0,0 @@ - - - Mounting Files - - - - - - - - - - -
-
- - - -
- HDF5 documents and links 
- Introduction to HDF5 
- HDF5 Reference Manual 
- HDF5 User's Guide for Release 1.6 
- -
- And in this document, the - HDF5 User's Guide from Release 1.4.5:     -
- Files   - Datasets   - Datatypes   - Dataspaces   - Groups   -
- References   - Attributes   - Property Lists   - Error Handling   -
- Filters   - Caching   - Chunking   - Mounting Files   -
- Performance   - Debugging   - Environment   - DDL   -
-
-
-

Mounting Files

- -

Purpose

- -

This document contrasts two methods for mounting an hdf5 file - on another hdf5 file: the case where the relationship between - files is a tree and the case where it's a graph. The tree case - simplifies current working group functions and allows symbolic - links to point into ancestor files whereas the graph case is - more consistent with the organization of groups within a - particular file. - -

Definitions

- -

If file child is mounted on file - parent at group /mnt in - parent then the contents of the root group of - child will appear in the group /mnt of - parent. The group /mnt is called the - mount point of the child in the parent. - -

Common Features

- -

These features are common to both mounting schemes. - -

    -
  • The previous contents of /mnt in - parent is temporarily hidden. If objects in that - group had names from other groups then the objects will still - be visible by those other names. - -
  • The mount point is actually an OID (not a name) so if there - are other names besides /mnt for that group then - the root group of the child will be visible in all those - names. - -
  • At most one file can be mounted per mount point but a parent - can have any number of mounted children. - -
  • Name lookups will entail a search through the mount table at - each stage of the lookup. The search will be O(log - N) where N is the number of children mounted - on that file. - -
  • Files open for read-only can be mounted on other files that - are open for read-only. Mounting a file in no way changes the - contents of the file. - -
  • Mounting a child may hide mount points that exist below the - child's mount point, but it does not otherwise affect mounted - files. - -
  • Hard links cannot cross file boundaries. An object cannot - be moved or renamed with H5Gmove() in such a way - that the new location would be in a different file than the - original location. - -
  • The child can be accessed in a manner different from the - parent. For instance, a read-write child in a read-only - parent, a parallel child in a serial parent, etc. - -
  • If some object in the child is open and the child is - unmounted and/or closed, the object will remain open and - accessible until explicitly closed. As in the mountless case, - the underlying UNIX file will be held open until all member - objects are closed. - -
  • Current working groups that point into a child will remain - open and usable even after the child has been unmounted and/or - closed. - -
  • Datasets that share a committed datatype must reside in the - same file as the datatype. - -
- -

Contrasting Features

- -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
TreeGraph
The set of mount-related files makes a tree.The set of mount-related files makes a directed - graph.
A file can be mounted at only one mount point.A file can be mounted at any number of mount points.
Symbolic links in the child that have a link value which - is an absolute name can be interpreted with respect to the - root group of either the child or the root of the mount - tree, a property which is determined when the child is - mounted.Symbolic links in the child that have a link value which - is an absolute name are interpreted with respect to the - root group of the child.
Closing a child causes it to be unmounted from the - parent.Closing a child has no effect on its relationship with - the parent. One can continue to access the child contents - through the parent.
Closing the parent recursively unmounts and closes all - mounted children.Closing the parent unmounts all children but - does not close them or unmount their children.
The current working group functions - H5Gset(), H5Gpush(), and - H5Gpop() operate on the root of the mount - tree.The current working group functions operate on the file - specified by their first argument.
Absolute name lookups (like for H5Dopen()) - are always performed with respect to the root of the mount - tree.Absolute name lookups are performed with respect to the - file specified by the first argument.
Relative name lookups (like for H5Dopen()) - are always performed with respect to the specified group - or the current working group of the root of the mount - tree.Relative name lookups are always performed with respect - to the specified group or the current working group of the - file specified by the first argument.
Mounting a child temporarily hides the current working - group stack for that childMounting a child has no effect on its current working - group stack.
Calling H5Fflush() will flush all files of - the mount tree regardless of which file is specified as - the argument.Calling H5Fflush() will flush only the - specified file.
-
- - -

Functions

- -
-
herr_t H5Fmount(hid_t loc, const char - *name, hid_t child, hid_t - plist) -
The file child is mounted at the specified location - in the parent. The loc and name specify the - mount point, a group in the parent. The plist - argument is an optional mount property list. The call will - fail if some file is already mounted on the specified group. - - - - - - - - - - - - - - - - - - -
TreeGraph
The call will fail if the child is already mounted - elsewhere.A child can be mounted at numerous mount points.
The call will fail if the child is an ancestor of the - parent.The mount graph is allowed to have cycles.
Subsequently closing the child will cause it to be - unmounted from the parent.Closing the child has no effect on its mount - relationship with the parent.
- -

-
herr_t H5Funmount(hid_t loc, const char - *name) -
Any file mounted at the group specified by loc and - name is unmounted. The child is not closed. This - function fails if no child is mounted at the specified point. - -

-
hid_t H5Pcreate(H5P_MOUNT) -
Creates and returns a new mount property list initialized - with default values. - -

-
herr_t H5Pset_symlink_locality(hid_t plist, - H5G_symlink_t locality) -
herr_t H5Pget_symlink_locality(hid_t plist, - H5G_symlink_t *locality) -
These functions exist only for the tree scheme. They set or - query the property that determines whether symbolic links with - absolute name value in the child are looked up with respect to - the child or to the mount root. The possible values are - H5G_SYMLINK_LOCAL or - H5G_SYMLINK_GLOBAL (the default). - -

-
hid_t H5Freopen(hid_t file) -
A file handle is reopened, creating an additional file - handle. The new file handle refers to the same file but has an - empty current working group stack. - - - - - - - - - - -
TreeGraph
The new handle is not mounted but the old handle - continues to be mounted.The new handle is mounted at the same location(s) as - the original handle.
-
- -

Example

- -

A file eos.h5 contains data which is constant for - all problems. The output of a particular physics application is - dumped into data1.h5 and data2.h5 and - the physics expects various constants from eos.h5 - in the eos group of the two data files. Instead of - copying the contents of eos.h5 into every physics - output file we simply mount eos.h5 as a read-only - child of data1.h5 and data2.h5. - -

- - - -

Tree

-/* Create data1.h5 */
-data1 = H5Fcreate("data1.h5", H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
-H5Gclose(H5Gcreate(data1, "/eos", 0));
-H5Gset_comment(data1, "/eos", "EOS mount point");
-
-/* Create data2.h5 */
-data2 = H5Fcreate("data2.h5", H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
-H5Gclose(H5Gcreate(data2, "/eos", 0));
-H5Gset_comment(data2, "/eos", "EOS mount point");
-
-/* Open eos.h5 and mount it in both files */
-eos1 = H5Fopen("eos.h5", H5F_ACC_RDONLY, H5P_DEFAULT);
-H5Fmount(data1, "/eos", eos1, H5P_DEFAULT);
-eos2 = H5Freopen(eos1);
-H5Fmount(data2, "/eos", eos2, H5P_DEFAULT);
-
-    ... physics output ...
-
-H5Fclose(data1);
-H5Fclose(data2);
-	      

Graph

-/* Create data1.h5 */
-data1 = H5Fcreate("data1.h5", H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
-H5Gclose(H5Gcreate(data1, "/eos", 0));
-H5Gset_comment(data1, "/eos", "EOS mount point");
-
-/* Create data2.h5 */
-data2 = H5Fcreate("data2.h5", H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
-H5Gclose(H5Gcreate(data2, "/eos", 0));
-H5Gset_comment(data2, "/eos", "EOS mount point");
-
-/* Open eos.h5 and mount it in both files */
-eos = H5Fopen("eos.h5", H5F_ACC_RDONLY, H5P_DEFAULT);
-H5Fmount(data1, "/eos", eos, H5P_DEFAULT);
-H5Fmount(data2, "/eos", eos, H5P_DEFAULT);
-H5Fclose(eos);
-
-    ... physics output ...
-
-H5Fclose(data1);
-H5Fclose(data2);
-	      
-
- - -
-
- - - -
- HDF5 documents and links 
- Introduction to HDF5 
- HDF5 Reference Manual 
- HDF5 User's Guide for Release 1.6 
- -
- And in this document, the - HDF5 User's Guide from Release 1.4.5:     -
- Files   - Datasets   - Datatypes   - Dataspaces   - Groups   -
- References   - Attributes   - Property Lists   - Error Handling   -
- Filters   - Caching   - Chunking   - Mounting Files   -
- Performance   - Debugging   - Environment   - DDL   -
-
-
-
-HDF Help Desk -
-Describes HDF5 Release 1.4.5, February 2003 -
- -Last modified: 14 October 1999 - - - - diff --git a/doc/html/NCSAfooterlogo.gif b/doc/html/NCSAfooterlogo.gif deleted file mode 100644 index b6b1cff..0000000 Binary files a/doc/html/NCSAfooterlogo.gif and /dev/null differ diff --git a/doc/html/ObjectHeader.txt b/doc/html/ObjectHeader.txt deleted file mode 100644 index d769377..0000000 --- a/doc/html/ObjectHeader.txt +++ /dev/null @@ -1,60 +0,0 @@ -OBJECT HEADERS --------------- - -haddr_t -H5O_new (hdf5_file_t *f, intn nrefs, size_t size_hint) - - Creates a new empty object header and returns its address. - The SIZE_HINT is the initial size of the data portion of the - object header and NREFS is the number of symbol table entries - that reference this object header (normally one). - - If SIZE_HINT is too small, then at least some default amount - of space is allocated for the object header. - -intn /*num remaining links */ -H5O_link (hdf5_file_t *f, /*file containing header */ - haddr_t addr, /*header file address */ - intn adjust) /*link adjustment amount */ - - -size_t -H5O_sizeof (hdf5_file_t *f, /*file containing header */ - haddr_t addr, /*header file address */ - H5O_class_t *type, /*message type or H5O_ANY */ - intn sequence) /*sequence number, usually zero */ - - Returns the size of a particular instance of a message in an - object header. When an object header has more than one - instance of a particular message type, then SEQUENCE indicates - which instance to return. - -void * -H5O_read (hdf5_file_t *f, /*file containing header */ - haddr_t addr, /*header file address */ - H5G_entry_t *ent, /*optional symbol table entry */ - H5O_class_t *type, /*message type or H5O_ANY */ - intn sequence, /*sequence number, usually zero */ - size_t size, /*size of output message */ - void *mesg) /*output buffer */ - - Reads a message from the object header into memory. - -const void * -H5O_peek (hdf5_file_t *f, /*file containing header */ - haddr_t addr, /*header file address */ - H5G_entry_t *ent, /*optional symbol table entry */ - H5O_class_t *type, /*type of message or H5O_ANY */ - intn sequence) /*sequence number, usually zero */ - -haddr_t /*new heap address */ -H5O_modify (hdf5_file_t *f, /*file containing header */ - haddr_t addr, /*header file address */ - H5G_entry_t *ent, /*optional symbol table entry */ - hbool_t *ent_modified, /*entry modification flag */ - H5O_class_t *type, /*message type */ - intn overwrite, /*sequence number or -1 */ - void *mesg) /*the message */ - - - diff --git a/doc/html/PSandPDF/Makefile.am b/doc/html/PSandPDF/Makefile.am deleted file mode 100644 index 0d85ec4..0000000 --- a/doc/html/PSandPDF/Makefile.am +++ /dev/null @@ -1,16 +0,0 @@ -# HDF5 Library Doc Makefile(.in) -# -# Copyright (C) 1997, 2002 -# National Center for Supercomputing Applications. -# All rights reserved. -# -## -## Makefile.am -## Run automake to generate a Makefile.in from this file. -# - -include $(top_srcdir)/config/commence-doc.am - -localdocdir=$(docdirc)/hdf5/PSandPDF - -# This makefile does nothing; there are no files to install. diff --git a/doc/html/PSandPDF/Makefile.in b/doc/html/PSandPDF/Makefile.in deleted file mode 100644 index dcf57ca..0000000 --- a/doc/html/PSandPDF/Makefile.in +++ /dev/null @@ -1,453 +0,0 @@ -# Makefile.in generated by automake 1.9.5 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005 Free Software Foundation, Inc. -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# HDF5 Library Doc Makefile(.in) -# -# Copyright (C) 1997, 2002 -# National Center for Supercomputing Applications. -# All rights reserved. -# -# -srcdir = @srcdir@ -top_srcdir = @top_srcdir@ -VPATH = @srcdir@ -pkgdatadir = $(datadir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -top_builddir = ../../.. -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -INSTALL = @INSTALL@ -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ - $(top_srcdir)/config/commence-doc.am \ - $(top_srcdir)/config/commence.am -subdir = doc/html/PSandPDF -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/configure.in -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(SHELL) $(top_srcdir)/bin/mkinstalldirs -CONFIG_HEADER = $(top_builddir)/src/H5config.h -CONFIG_CLEAN_FILES = -SOURCES = -DIST_SOURCES = -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) - -# Set the paths for AFS installs of autotools for Linux machines -# Ideally, these tools should never be needed during the build. -ACLOCAL = /afs/ncsa/projects/hdf/packages/automake_1.9.5/Linux_2.4/bin/aclocal -I /afs/ncsa/projects/hdf/packages/libtool_1.5.14/Linux_2.4/share/aclocal -ADD_PARALLEL_FILES = @ADD_PARALLEL_FILES@ -AMDEP_FALSE = @AMDEP_FALSE@ -AMDEP_TRUE = @AMDEP_TRUE@ -AMTAR = @AMTAR@ -AM_MAKEFLAGS = @AM_MAKEFLAGS@ -AR = @AR@ -AUTOCONF = /afs/ncsa/projects/hdf/packages/autoconf_2.59/Linux_2.4/bin/autoconf -AUTOHEADER = /afs/ncsa/projects/hdf/packages/autoconf_2.59/Linux_2.4/bin/autoheader -AUTOMAKE = /afs/ncsa/projects/hdf/packages/automake_1.9.5/Linux_2.4/bin/automake -AWK = @AWK@ -BUILD_CXX_CONDITIONAL_FALSE = @BUILD_CXX_CONDITIONAL_FALSE@ -BUILD_CXX_CONDITIONAL_TRUE = @BUILD_CXX_CONDITIONAL_TRUE@ -BUILD_FORTRAN_CONDITIONAL_FALSE = @BUILD_FORTRAN_CONDITIONAL_FALSE@ -BUILD_FORTRAN_CONDITIONAL_TRUE = @BUILD_FORTRAN_CONDITIONAL_TRUE@ -BUILD_HDF5_HL_CONDITIONAL_FALSE = @BUILD_HDF5_HL_CONDITIONAL_FALSE@ -BUILD_HDF5_HL_CONDITIONAL_TRUE = @BUILD_HDF5_HL_CONDITIONAL_TRUE@ -BUILD_PABLO_CONDITIONAL_FALSE = @BUILD_PABLO_CONDITIONAL_FALSE@ -BUILD_PABLO_CONDITIONAL_TRUE = @BUILD_PABLO_CONDITIONAL_TRUE@ -BUILD_PARALLEL_CONDITIONAL_FALSE = @BUILD_PARALLEL_CONDITIONAL_FALSE@ -BUILD_PARALLEL_CONDITIONAL_TRUE = @BUILD_PARALLEL_CONDITIONAL_TRUE@ -BUILD_PDB2HDF = @BUILD_PDB2HDF@ -BUILD_PDB2HDF_CONDITIONAL_FALSE = @BUILD_PDB2HDF_CONDITIONAL_FALSE@ -BUILD_PDB2HDF_CONDITIONAL_TRUE = @BUILD_PDB2HDF_CONDITIONAL_TRUE@ -BYTESEX = @BYTESEX@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CC_VERSION = @CC_VERSION@ -CFLAGS = @CFLAGS@ -CONFIG_DATE = @CONFIG_DATE@ -CONFIG_MODE = @CONFIG_MODE@ -CONFIG_USER = @CONFIG_USER@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CXX = @CXX@ -CXXCPP = @CXXCPP@ -CXXDEPMODE = @CXXDEPMODE@ -CXXFLAGS = @CXXFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEBUG_PKG = @DEBUG_PKG@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -DYNAMIC_DIRS = @DYNAMIC_DIRS@ -ECHO = @ECHO@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -F77 = @F77@ - -# Make sure that these variables are exported to the Makefiles -F9XMODEXT = @F9XMODEXT@ -F9XMODFLAG = @F9XMODFLAG@ -F9XSUFFIXFLAG = @F9XSUFFIXFLAG@ -FC = @FC@ -FCFLAGS = @FCFLAGS@ -FCLIBS = @FCLIBS@ -FFLAGS = @FFLAGS@ -FILTERS = @FILTERS@ -FSEARCH_DIRS = @FSEARCH_DIRS@ -H5_VERSION = @H5_VERSION@ -HADDR_T = @HADDR_T@ -HDF5_INTERFACES = @HDF5_INTERFACES@ -HID_T = @HID_T@ -HL = @HL@ -HL_FOR = @HL_FOR@ -HSIZET = @HSIZET@ -HSIZE_T = @HSIZE_T@ -HSSIZE_T = @HSSIZE_T@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -INSTRUMENT_LIBRARY = @INSTRUMENT_LIBRARY@ -LDFLAGS = @LDFLAGS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LIBTOOL = @LIBTOOL@ -LN_S = @LN_S@ -LTLIBOBJS = @LTLIBOBJS@ -LT_STATIC_EXEC = @LT_STATIC_EXEC@ -MAINT = @MAINT@ -MAINTAINER_MODE_FALSE = @MAINTAINER_MODE_FALSE@ -MAINTAINER_MODE_TRUE = @MAINTAINER_MODE_TRUE@ -MAKEINFO = @MAKEINFO@ -MPE = @MPE@ -OBJECT_NAMELEN_DEFAULT_F = @OBJECT_NAMELEN_DEFAULT_F@ -OBJEXT = @OBJEXT@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PARALLEL = @PARALLEL@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PERL = @PERL@ -PTHREAD = @PTHREAD@ -RANLIB = @RANLIB@ -ROOT = @ROOT@ -RUNPARALLEL = @RUNPARALLEL@ -RUNSERIAL = @RUNSERIAL@ -R_INTEGER = @R_INTEGER@ -R_LARGE = @R_LARGE@ -SEARCH = @SEARCH@ -SETX = @SETX@ -SET_MAKE = @SET_MAKE@ - -# Hardcode SHELL to be /bin/sh. Most machines have this shell, and -# on at least one machine configure fails to detect its existence (janus). -# Also, when HDF5 is configured on one machine but run on another, -# configure's automatic SHELL detection may not work on the build machine. -SHELL = /bin/sh -SIZE_T = @SIZE_T@ -STATIC_SHARED = @STATIC_SHARED@ -STRIP = @STRIP@ -TESTPARALLEL = @TESTPARALLEL@ -TRACE_API = @TRACE_API@ -USE_FILTER_DEFLATE = @USE_FILTER_DEFLATE@ -USE_FILTER_FLETCHER32 = @USE_FILTER_FLETCHER32@ -USE_FILTER_NBIT = @USE_FILTER_NBIT@ -USE_FILTER_SCALEOFFSET = @USE_FILTER_SCALEOFFSET@ -USE_FILTER_SHUFFLE = @USE_FILTER_SHUFFLE@ -USE_FILTER_SZIP = @USE_FILTER_SZIP@ -VERSION = @VERSION@ -ac_ct_AR = @ac_ct_AR@ -ac_ct_CC = @ac_ct_CC@ -ac_ct_CXX = @ac_ct_CXX@ -ac_ct_F77 = @ac_ct_F77@ -ac_ct_FC = @ac_ct_FC@ -ac_ct_RANLIB = @ac_ct_RANLIB@ -ac_ct_STRIP = @ac_ct_STRIP@ -am__fastdepCC_FALSE = @am__fastdepCC_FALSE@ -am__fastdepCC_TRUE = @am__fastdepCC_TRUE@ -am__fastdepCXX_FALSE = @am__fastdepCXX_FALSE@ -am__fastdepCXX_TRUE = @am__fastdepCXX_TRUE@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_vendor = @build_vendor@ -datadir = @datadir@ -exec_prefix = @exec_prefix@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ - -# Install directories that automake doesn't know about -includedir = $(exec_prefix)/include -infodir = @infodir@ -install_sh = @install_sh@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localstatedir = @localstatedir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -oldincludedir = @oldincludedir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ - -# Shell commands used in Makefiles -RM = rm -f -CP = cp - -# Some machines need a command to run executables; this is that command -# so that our tests will run. -# We use RUNTESTS instead of RUNSERIAL directly because it may be that -# some tests need to be run with a different command. Older versions -# of the makefiles used the command -# $(LIBTOOL) --mode=execute -# in some directories, for instance. -RUNTESTS = $(RUNSERIAL) - -# Libraries to link to while building -LIBHDF5 = $(top_builddir)/src/libhdf5.la -LIBH5TEST = $(top_builddir)/test/libh5test.la -LIBH5F = $(top_builddir)/fortran/src/libhdf5_fortran.la -LIBH5FTEST = $(top_builddir)/fortran/test/libh5test_fortran.la -LIBH5CPP = $(top_builddir)/c++/src/libhdf5_cpp.la -LIBH5TOOLS = $(top_builddir)/tools/lib/libh5tools.la -LIBH5_HL = $(top_builddir)/hl/src/libhdf5_hl.la -LIBH5F_HL = $(top_builddir)/hl/fortran/src/libhdf5hl_fortran.la -LIBH5CPP_HL = $(top_builddir)/hl/c++/src/libhdf5_hl_cpp.la -docdir = $(exec_prefix)/doc - -# Scripts used to build examples -H5CC = $(bindir)/h5cc -H5CC_PP = $(bindir)/h5pcc -H5FC = $(bindir)/h5fc -H5FC_PP = $(bindir)/h5pfc - -# .chkexe and .chksh files are used to mark tests that have run successfully. -MOSTLYCLEANFILES = *.chkexe *.chksh -localdocdir = $(docdirc)/hdf5/PSandPDF -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/config/commence-doc.am $(top_srcdir)/config/commence.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \ - && exit 0; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign doc/html/PSandPDF/Makefile'; \ - cd $(top_srcdir) && \ - $(AUTOMAKE) --foreign doc/html/PSandPDF/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -mostlyclean-libtool: - -rm -f *.lo - -clean-libtool: - -rm -rf .libs _libs - -distclean-libtool: - -rm -f libtool -uninstall-info-am: -tags: TAGS -TAGS: - -ctags: CTAGS -CTAGS: - - -distdir: $(DISTFILES) - $(mkdir_p) $(distdir)/../../../config - @srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's|.|.|g'`; \ - list='$(DISTFILES)'; for file in $$list; do \ - case $$file in \ - $(srcdir)/*) file=`echo "$$file" | sed "s|^$$srcdirstrip/||"`;; \ - $(top_srcdir)/*) file=`echo "$$file" | sed "s|^$$topsrcdirstrip/|$(top_builddir)/|"`;; \ - esac; \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - dir=`echo "$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test "$$dir" != "$$file" && test "$$dir" != "."; then \ - dir="/$$dir"; \ - $(mkdir_p) "$(distdir)$$dir"; \ - else \ - dir=''; \ - fi; \ - if test -d $$d/$$file; then \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \ - fi; \ - cp -pR $$d/$$file $(distdir)$$dir || exit 1; \ - else \ - test -f $(distdir)/$$file \ - || cp -p $$d/$$file $(distdir)/$$file \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - `test -z '$(STRIP)' || \ - echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install -mostlyclean-generic: - -test -z "$(MOSTLYCLEANFILES)" || rm -f $(MOSTLYCLEANFILES) - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." -clean: clean-am - -clean-am: clean-generic clean-libtool mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic distclean-libtool - -dvi: dvi-am - -dvi-am: - -html: html-am - -info: info-am - -info-am: - -install-data-am: - -install-exec-am: - -install-info: install-info-am - -install-man: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic mostlyclean-libtool - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: uninstall-info-am - -.PHONY: all all-am check check-am clean clean-generic clean-libtool \ - distclean distclean-generic distclean-libtool distdir dvi \ - dvi-am html html-am info info-am install install-am \ - install-data install-data-am install-exec install-exec-am \ - install-info install-info-am install-man install-strip \ - installcheck installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic \ - mostlyclean-libtool pdf pdf-am ps ps-am uninstall uninstall-am \ - uninstall-info-am - - -# Ignore most rules -lib progs check test _test check-p check-s: - @echo "Nothing to be done" - -tests dep depend: - @@SETX@; for d in X $(SUBDIRS); do \ - if test $$d != X; then \ - (cd $$d && $(MAKE) $(AM_MAKEFLAGS) $@) || exit 1; \ - fi; - done - -# In docs directory, install-doc is the same as install -install-doc install-all: - $(MAKE) $(AM_MAKEFLAGS) install -uninstall-doc uninstall-all: - $(MAKE) $(AM_MAKEFLAGS) uninstall - -# This makefile does nothing; there are no files to install. -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/doc/html/PSandPDF/process.txt b/doc/html/PSandPDF/process.txt deleted file mode 100644 index 92fe2e6..0000000 --- a/doc/html/PSandPDF/process.txt +++ /dev/null @@ -1,218 +0,0 @@ - -============================================================================== -Process for Creating PDF Versions of the HDF5 RM and UG from HTML Source Files -============================================================================== - - Last modified: 24 Sep 2004 - -PDF versions of the HDF5 Reference Manual (RM) and the new HDF5 User's Guide -(UG) are created from the HTML source at each release of the HDF5 Library. -The process is managed through two application environments, Macromedia -DreamWeaver (an HTML editing environment) and HTML doc (a conversion tool). -This document describes the process in detail for the RM; the process for the -UG is very similar and will be described in a subsequent update. - - -The HDF5 Reference Manual -------------------------- -1. Using Macromedia Dreamweaver, define a site that contains all the - documents within doc/html, including html, image, DreamWeaver library and - .book files. - -2. In the ed_libs directory: - - a) Assuming you have your libraries set up for electronic viewing (to - verify, check that there is nothing more than a commented note and - an ' ' in the 3 aforementioned _Null libraries) drop the code - from the actual library into it's respective Null lib, replacing - the  . - (EXAMPLE: Take all the code from Footer.lbi and insert it into - Footer_Null.lbi, taking care not to remove the commented note.) - - b) In the now empty library file, add an  . - - c) Save the libraries and thier respective Null files. - - d) With a library still open (not a Null one) choose - Modify>Library>Update Pages... - Look in: Entire Site - Check 'Library Items' in the 'Update:' selection. - Hit start. - -3. In RM_H5P.html: - - a) RM_H5P.html contains two versions of the 'C Interfaces' function - list. One is for print use and one is for electronic use. - One should be visible and one should be commented out. - Make sure that the section labelled with - is commented out. - (by removing the >s). - - -4. Using HTMLdoc: - - a) Open PDF_RM_body.book - - b) The Input tab: - Document Type: Web Page - Input Files: - RM_H5Front.html - RM_H5.html - RM_H5A.html - RM_H5D.html - RM_H5E.html - RM_H5F.html - RM_H5G.html - RM_H5I.html - RM_H5P.html - RM_H5R.html - RM_H5S.html - RM_H5T.html - RM_H5Z.html - Tools.html - PredefDTypes.html - Glossary.html - Logo Image: none - Title File/Image: none - - c) The Output tab: - Output To: File - Output Path: H5_RM_body.pdf - Output Format: PDF - Output Options: JPEG Big Images - Compression (not critical, change at will): Slider right above 'Fast' - JPEG Quality (not critical, change at will): 60 - - d) The Page tab: - Page Size: Letter, 2-Sided - Top: 0.50in, Left: 1.00in, Right: 0.50in, Bottom: 0.50in - Header: Blank, Blank, Blank - Footer: Blank, Blank, 1,2,3,... - Number Up: 1 - - e) The Colors tab: - Everything Blank - Link Style: Plain - - f) The Fonts tab: - Base Font Size: 11.0 - Line Spacing: 1.2 - Body Typeface: Times - Heading Typeface: Helvetica - Header/Footer Size: 10.0 - Header/Footer Font: Helvetica - Character Set: iso-8859-1 - Options: Do Not Check 'Embed Fonts' - - g) The PDF tab: - PDF Version: 1.3 - Page Mode: Document - Page Layout: Single - Page Effect: None - Options: Check 'Include Links' - - h) The Security tab: - Encryption: No - - i) The Options tab: - HTML Editor: Point to Dreamweaver (recommended) - Browser Width: 680 - GUI Option: Check all 3 - - j) Save the book file. - - k) Verify that the output file (H5_RM_body.pdf) is not open. - - l) Generate the document. - -5. Open H5_RM_body.pdf - - a) Scan through the document page by page to verify that there are no - elements that should have been eliminated via javascripts or swapped - library contents. - - b) Scan through again looking only for places where page breaks need to - be added, removed, or relocated. I have found that re-generating the - document after every pagination correction actually saves time in the - long run. This is because some pagination corrections that must be - made may create other problems further down in the document. - Re-generating the document ensures that every problem you focus on - won't have to be re-done. - - c) When you are sure all the pagination is correct (don't forget to - start each of the sections included in the Table of Contents on a - recto) note page numbers for the beginning of each section in the TOC. - -6. Open RM_TOC.html - - a) Edit page numbers accordingly. - (Note: Assuming the general format of RM_H5Front remains the same, - the 'Overview' listing in the TOC should always be on pg. 1, and - the 'Fortran90 and C++ APIs' listing should be on pg. 2, an - exepction to the recto-rule.) - - b) Save. - -7. Using HTMLdoc: - - a) Open PDF_RM_front.book - - b) The Input tab: - Document Type: Web Page - Input Files: - RM_Title.html - Copyright.html - RM_TOC.html - Logo Image: none - Title File/Image: none - - c) The Output tab: - Output To: File - Output Path: H5_RM_front.pdf - Output Format: PDF - Output Options: JPEG Big Images - Compression (not critical, change at will): Slider right above 'Fast' - JPEG Quality (not critical, change at will): 60 - - d) The Page tab: - Page Size: Letter, 2-Sided - Top: 0.50in, Left: 1.00in, Right: 0.50in, Bottom: 0.50in - Header: Blank, Blank, Blank - Footer: Blank, Blank, Blank - Number Up: 1 - - e) The Colors tab: - Everything Blank - Link Style: Plain - - f) The Fonts tab: - Base Font Size: 11.0 - Line Spacing: 1.2 - Body Typeface: Times - Heading Typeface: Helvetica - Header/Footer Size: 10.0 - Header/Footer Font: Helvetica - Character Set: iso-8859-1 - Options: Do Not Check 'Embed Fonts' - - g) The PDF tab: - PDF Version: 1.3 - Page Mode: Document - Page Layout: Single - Page Effect: None - Options: Check 'Include Links' - - h) The Security tab: - Encryption: No - - i) The Options tab: - HTML Editor: Point to Dreamweaver (recommended) - Browser Width: 680 - GUI Options: Check all 3 - - j) Save the book file. - - k) Verify that the output file (H5_RM_front.pdf) is not open. - - l) Generate the document. - diff --git a/doc/html/Performance.html b/doc/html/Performance.html deleted file mode 100644 index 25db8f4..0000000 --- a/doc/html/Performance.html +++ /dev/null @@ -1,260 +0,0 @@ - - - - Performance - - - - - - - - - - -
-
- - - -
- HDF5 documents and links 
- Introduction to HDF5 
- HDF5 Reference Manual 
- HDF5 User's Guide for Release 1.6 
- -
- And in this document, the - HDF5 User's Guide from Release 1.4.5:     -
- Files   - Datasets   - Datatypes   - Dataspaces   - Groups   -
- References   - Attributes   - Property Lists   - Error Handling   -
- Filters   - Caching   - Chunking   - Mounting Files   -
- Performance   - Debugging   - Environment   - DDL   -
-
-
-

Performance Analysis and Issues

- -

1. Introduction

- -

This section includes brief discussions of performance issues - in HDF5 and performance analysis tools for HDF5 or pointers to - such discussions. - -

2. Dataset Chunking

- - Appropriate dataset chunking can make a siginificant difference - in HDF5 performance. This topic is discussed in - Dataset Chunking Issues elsewhere - in this User's Guide. - - -

3. Freespace Management

-
- -

HDF5 does not yet manage freespace as effectively as it might. - While a file is opened, the library actively tracks and re-uses - freespace, i.e., space that is freed (or released) - during the run. - But the library does not yet manage freespace across the - closing and reopening of a file; when a file is closed, - all knowledge of available freespace is lost. - What was freespace becomes an unusable hole in the file. - -

There are several circumstances that can result in freespace - in an HDF5 file: -

    -
  • Reading then rewriting a dataset or compressed dataset - chunk.1 -
      -
    • If the rewritten dataset or compressed chunk is the same - size as or smaller than the original, it will be written - to the same file location. -
    • If, however, the dataset or compressed chunk is larger - than the original, it will be written contiguously elsewhere - in the file, leaving freespace at the original location. -
    • If the rewritten dataset or compressed chunk is - substantially smaller than the original, the remaining - space will be released and identified as freespace. -
    -
  • Deleting (or unlinking) a dataset or group. -
      -
    • If an object, such as a dataset, group, or named datatype, - is deleted (normally with H5Gunlink), - the space previously occupied by the object is released - and identified as freespace. -
    -
- -

As stated above, freespace is not managed across the - closing and reopening of an HDF5 file; file space that was - known freespace while the file remained open becomes an - inaccessible hole when the file is closed. - Thus, if a file is often closed and reopened, datasets - frequently rewritten, or groups and/or datasets frequently - added and deleted, that file can develop large numbers of - holes and grow unnecessarily large. This can, in turn, - seriously impair application or library performance - as the file ages. - -

An h5pack utility would enable packing - a file to remove the holes, but writing such a utility to - universally pack the file correctly is a complex task and the - HDF5 development team has not to date had the resources to - complete the task. - -

For application developers or researchers who find themselves - working with files that become bloated in this manner, there - are, at this time, two remedies: -

    -
  • H5view, an HDF5 Java tool, allows the user - to open a file and, using the Save As... feature, - save the file under a new filename. The new file can then - be closed and will be a packed version of the original file. - This approach is reasonably reliable, but with two caveats: -
      -
    • It is not automated. -
    • This ability is a side-effect of the tool's design; - it was not designed for this purpose and this approach - to file packing has not been exhaustively tested. -
    -
  • An application developer or researcher can write a utility - that is tuned to their data and file structures. This - untility can then read in a file, copy the structures and - datasets to a new file, and write the new file to storage. - This will eliminate the holes, making the new file a - fully-packed version of the original file. -
- - -

- 1 - - This is a problem only with compressed chunks. - The compression ratio of data is highly dependent on the data - itself; regardless of whether the size of the data - changes, the size of the compressed data change substantially - as the data changes. Uncompressed chunks do not vary in size, - so this issue does not arise. - - -

4. Use of the Pablo Instrumentation of HDF5

- - Pablo HDF5 Trace software provides a means of measuring the - performance of programs using HDF5. - -

The Pablo software consists - of an instrumented copy of the HDF5 library, the Pablo Trace and - Trace Extensions libraries, and some utilities for processing the - output. The instrumented version of the HDF5 library has hooks - inserted into the HDF5 code which call routines in the Pablo Trace - library just after entry to each instrumented HDF5 routine and - just prior to exit from the routine. The Pablo Trace Extension - library has programs that track the I/O activity between the - entry and exit of the HDF5 routine during execution. - -

A few lines of code must be inserted in the user's main program - to enable tracing and to specify which HDF5 procedures are to be - traced. The program is linked with the special HDF5 and Pablo - libraries to produce an executable. Running this executable on - a single processor produces an output file called the trace file - which contains records, called Pablo Self-Defining Data Format - (SDDF) records, which can later be analyzed using the - HDF5 Analysis Utilities. The HDF5 Analysis Utilites can be used - to interpret the SDDF records in the trace files to produce a - report describing the HDF5 IO activity that occurred during - execution. - -

For further instructions, see the file READ_ME - in the $(toplevel)/hdf5/pablo/ subdirectory of - the HDF5 source code distribution. - -

For further information about Pablo and the - Self-Defining Data Format, visit the Pablo website at - http://www-pablo.cs.uiuc.edu/.

- - -
-
- - - -
- HDF5 documents and links 
- Introduction to HDF5 
- HDF5 Reference Manual 
- HDF5 User's Guide for Release 1.6 
- -
- And in this document, the - HDF5 User's Guide from Release 1.4.5:     -
- Files   - Datasets   - Datatypes   - Dataspaces   - Groups   -
- References   - Attributes   - Property Lists   - Error Handling   -
- Filters   - Caching   - Chunking   - Mounting Files   -
- Performance   - Debugging   - Environment   - DDL   -
-
-
-
-HDF Help Desk -
-Describes HDF5 Release 1.4.5, February 2003 -
- -Last modified: 2 August 2001 - - - - diff --git a/doc/html/PredefDTypes.html b/doc/html/PredefDTypes.html deleted file mode 100644 index 0f72414..0000000 --- a/doc/html/PredefDTypes.html +++ /dev/null @@ -1,516 +0,0 @@ - - -HDF5/Predefined Datatypes - - - - - - - - - - - - -
-
- - - -
-HDF5 documents and links 
-Introduction to HDF5 
-HDF5 User Guide 
- -
-And in this document, the -HDF5 Reference Manual   -
-H5IM   -H5LT   -H5PT   -H5TB   -
-H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5I   -H5P   -
-H5R   -H5S   -H5T   -H5Z   -Tools   -Datatypes   -
-
-
-

HDF5 Predefined Datatypes

-
- - -The following datatypes are predefined in HDF5. - - -

IEEE floating point datatypes

-
    -
  • 32-bit and 64-bit -
  • Big-endian and little-endian -
- -
-        H5T_IEEE_F32BE
-        H5T_IEEE_F32LE
-        H5T_IEEE_F64BE
-        H5T_IEEE_F64LE
-
- - -

Standard datatypes

-
    -
  • Signed integer (2's complement), unsigned integer, and bitfield -
  • 8-bit, 16-bit, 32-bit, and 64-bit -
  • Big-endian and little-endian -
- - - - - -
-
-        H5T_STD_I8BE
-        H5T_STD_I8LE
-        H5T_STD_I16BE
-        H5T_STD_I16LE
-        H5T_STD_I32BE
-        H5T_STD_I32LE
-        H5T_STD_I64BE
-        H5T_STD_I64LE
-
-
-
-        H5T_STD_U8BE 
-        H5T_STD_U8LE
-        H5T_STD_U16BE
-        H5T_STD_U16LE
-        H5T_STD_U32BE
-        H5T_STD_U32LE
-        H5T_STD_U64BE 
-        H5T_STD_U64LE
-
-
-
-        H5T_STD_B8BE
-        H5T_STD_B8LE
-        H5T_STD_B16BE 
-        H5T_STD_B16LE
-        H5T_STD_B32BE
-        H5T_STD_B32LE
-        H5T_STD_B64BE 
-        H5T_STD_B64LE
-
-
- -
    -
  • Object reference or dataset region reference -
- -
-        H5T_STD_REF_OBJ
-        H5T_STD_REF_DSETREG 
-
- - -

UNIX-specific datatypes

-
    -
  • 32-bit and 64-bit -
  • Big-endian and little-endian -
- -
-        H5T_UNIX_D32BE
-        H5T_UNIX_D32LE
-        H5T_UNIX_D64BE
-        H5T_UNIX_D64LE
-
- - -

C-specific datatype

-
    -
  • String datatype in C (size defined in bytes rather than in bits) -
- -
-        H5T_C_S1
-
- - -

FORTRAN-specific datatype

-
    -
  • String datatype in FORTRAN (as defined for the HDF5 C library) -
- -
-        H5T_FORTRAN_S1
-
- - -

Intel-specific datatypes

-
    -
  • For Intel CPUs -
  • Little-endian -
  • Signed integer (2's complement), unsigned integer, bitfield, and - IEEE floating point -
  • 8-bit, 16-bit, 32-bit, and 64-bit -
- - - - -
-
-        H5T_INTEL_I8
-        H5T_INTEL_I16
-        H5T_INTEL_I32
-        H5T_INTEL_I64
-
-        H5T_INTEL_U8
-        H5T_INTEL_U16
-        H5T_INTEL_U32
-        H5T_INTEL_U64
-
-
-
-        H5T_INTEL_B8
-        H5T_INTEL_B16
-        H5T_INTEL_B32
-        H5T_INTEL_B64
-
-        H5T_INTEL_F32
-        H5T_INTEL_F64
-
-
- - -

DEC Alpha-specific datatypes

-
    -
  • For DEC Alpha CPUs -
  • Little-endian -
  • Signed integer (2's complement), unsigned integer, bitfield, and - IEEE floating point -
  • 8-bit, 16-bit, 32-bit, and 64-bit -
- - - - -
-
-        H5T_ALPHA_I8
-        H5T_ALPHA_I16
-        H5T_ALPHA_I32
-        H5T_ALPHA_I64
-
-        H5T_ALPHA_U8
-        H5T_ALPHA_U16
-        H5T_ALPHA_U32
-        H5T_ALPHA_U64
-
-
-
-        H5T_ALPHA_B8
-        H5T_ALPHA_B16
-        H5T_ALPHA_B32
-        H5T_ALPHA_B64
-
-        H5T_ALPHA_F32
-        H5T_ALPHA_F64
-
-
- - -

MIPS-specific datatypes

-
    -
  • For MIPS CPUs, commonly used in SGI system -
  • Big-endian -
  • Signed integer (2's complement), unsigned integer, bitfield, and - IEEE floating point -
  • 8-bit, 16-bit, 32-bit, and 64-bit -
- - - - -
-
-        H5T_MIPS_I8
-        H5T_MIPS_I16
-        H5T_MIPS_I32
-        H5T_MIPS_I64
-
-        H5T_MIPS_U8
-        H5T_MIPS_U16
-        H5T_MIPS_U32
-        H5T_MIPS_U64
-
-
-
-        H5T_MIPS_B8
-        H5T_MIPS_B16
-        H5T_MIPS_B32
-        H5T_MIPS_B64
-
-        H5T_MIPS_F32
-        H5T_MIPS_F64
-
-
- - -

Predefined native datatypes

- - These are the datatypes detected by H5detect. - Their names differ from other HDF5 datatype names as follows: -
    -
  • Instead of a class name, precision, and byte order as the last - component, they have a C-like datatype name. -
  • If the datatype begins with U, then it is the unsigned - version of the integer datatype; other integer datatypes are signed. -
  • The datatype LLONG corresponds to - C's long_long and - LDOUBLE is long_double. - These datatypes might be the same as LONG and - DOUBLE, respectively. -
-
- - - - -
-
-        H5T_NATIVE_CHAR
-        H5T_NATIVE_SCHAR        
-        H5T_NATIVE_UCHAR        
-
-        H5T_NATIVE_SHORT        
-        H5T_NATIVE_USHORT       
-
-        H5T_NATIVE_INT          
-        H5T_NATIVE_UINT         
-
-        H5T_NATIVE_LONG         
-        H5T_NATIVE_ULONG        
-        H5T_NATIVE_LLONG        
-        H5T_NATIVE_ULLONG       
-
-
-
-        H5T_NATIVE_FLOAT        
-        H5T_NATIVE_DOUBLE       
-        H5T_NATIVE_LDOUBLE
-
-        H5T_NATIVE_B8
-        H5T_NATIVE_B16
-        H5T_NATIVE_B32
-        H5T_NATIVE_B64
-
-        H5T_NATIVE_OPAQUE       
-        H5T_NATIVE_HADDR
-        H5T_NATIVE_HSIZE
-        H5T_NATIVE_HSSIZE
-        H5T_NATIVE_HERR
-        H5T_NATIVE_HBOOL
-
-
- - -

ANSI C9x-specific native integer datatypes

-
    -
  • Signed integer (2's complement), unsigned integer, and bitfield -
  • 8-bit, 16-bit, 32-bit, and 64-bit -
  • LEAST -- storage to use least amount of space -
    - FAST -- storage to maximize performance -
- - - - -
-
-        H5T_NATIVE_INT8
-        H5T_NATIVE_UINT8
-        H5T_NATIVE_INT_LEAST8
-        H5T_NATIVE_UINT_LEAST8
-        H5T_NATIVE_INT_FAST8 
-        H5T_NATIVE_UINT_FAST8
-
-        H5T_NATIVE_INT16
-        H5T_NATIVE_UINT16
-        H5T_NATIVE_INT_LEAST16
-        H5T_NATIVE_UINT_LEAST16
-        H5T_NATIVE_INT_FAST16
-        H5T_NATIVE_UINT_FAST16
-
-
-
-        H5T_NATIVE_INT32
-        H5T_NATIVE_UINT32
-        H5T_NATIVE_INT_LEAST32
-        H5T_NATIVE_UINT_LEAST32
-        H5T_NATIVE_INT_FAST32
-        H5T_NATIVE_UINT_FAST32
-
-        H5T_NATIVE_INT64
-        H5T_NATIVE_UINT64
-        H5T_NATIVE_INT_LEAST64
-        H5T_NATIVE_UINT_LEAST64 
-        H5T_NATIVE_INT_FAST64
-        H5T_NATIVE_UINT_FAST64
-
-
- - - -

FORTRAN90 API datatypes

-
-
    -
  • Datatypes defined for the FORTRAN90 APIs -
    -
    -
  • Native integer, single-precision real, double-precision real, - and character -
- -
-        H5T_NATIVE_INTEGER
-        H5T_NATIVE_REAL
-        H5T_NATIVE_DOUBLE
-        H5T_NATIVE_CHARACTER 
-
- -
    -
  • Signed integer (2's complement), unsigned integer, and - IEEE floating point -
  • 8-bit, 16-bit, 32-bit, and 64-bit -
  • Big-endian and little-endian -
- - - - - -
-
-        H5T_STD_I8BE
-        H5T_STD_I8LE
-        H5T_STD_I16BE
-        H5T_STD_I16LE
-        H5T_STD_I32BE
-        H5T_STD_I32LE
-        H5T_STD_I64BE
-        H5T_STD_I64LE
-
-
-
-        H5T_STD_U8BE
-        H5T_STD_U8LE
-        H5T_STD_U16BE
-        H5T_STD_U16LE
-        H5T_STD_U32BE
-        H5T_STD_U32LE
-        H5T_STD_U64BE
-        H5T_STD_U64LE
-
-
-
-        H5T_IEEE_F32BE
-        H5T_IEEE_F32LE
-        H5T_IEEE_F64BE
-        H5T_IEEE_F64LE
-
-
- - -
    -
  • Object reference or dataset region reference -
- -
-        H5T_STD_REF_OBJ
-        H5T_STD_REF_DSETREG
-
- - -
-
- - - -
-HDF5 documents and links 
-Introduction to HDF5 
-HDF5 User Guide 
- -
-And in this document, the -HDF5 Reference Manual   -
-H5IM   -H5LT   -H5PT   -H5TB   -
-H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5I   -H5P   -
-H5R   -H5S   -H5T   -H5Z   -Tools   -Datatypes   -
-
-
-HDF Help Desk -
-Describes HDF5 Release 1.7, the unreleased development branch; working toward HDF5 Release 1.8.0 -
- - - diff --git a/doc/html/Properties.html b/doc/html/Properties.html deleted file mode 100644 index c13a269..0000000 --- a/doc/html/Properties.html +++ /dev/null @@ -1,185 +0,0 @@ - - - - Property List Interface (H5P) - - - - - - - - - - -
-
- - - -
- HDF5 documents and links 
- Introduction to HDF5 
- HDF5 Reference Manual 
- HDF5 User's Guide for Release 1.6 
- -
- And in this document, the - HDF5 User's Guide from Release 1.4.5:     -
- Files   - Datasets   - Datatypes   - Dataspaces   - Groups   -
- References   - Attributes   - Property Lists   - Error Handling   -
- Filters   - Caching   - Chunking   - Mounting Files   -
- Performance   - Debugging   - Environment   - DDL   -
-
-
-

The Property List Interface (H5P)

- -

1. Introduction

- -

The property list (a.k.a., template) interface provides a - mechanism for default named arguments for a C function - interface. A property list is a collection of name/value pairs - which can be passed to various other HDF5 functions to control - features that are typically unimportant or whose default values - are usually used. - -

For instance, file creation needs to know various things such - as the size of the user-block at the beginning of the file, or - the size of various file data structures. Wrapping this - information in a property list simplifies the API by reducing - the number of arguments to H5Fcreate(). - -

2. General Property List Operations

- -

Property lists follow the same create/open/close paradigm as - the rest of the library. - -

-
hid_t H5Pcreate (H5P_class_t class) -
A new property list can be created as an instance of some - property list class. The new property list is initialized - with default values for the specified class. The classes are: - -

-
-
H5P_FILE_CREATE -
Properties for file creation. See H5F - for details about the file creation properties. -
H5P_FILE_ACCESS -
Properties for file access. See H5F - for details about the file creation properties. -
H5P_DATASET_CREATE -
Properties for dataset creation. See - H5D for details about dataset - creation properties. -
H5P_DATASET_XFER -
Properties for raw data transfer. See - H5D for details about raw data - transfer properties. -
- -

-
hid_t H5Pcopy (hid_t plist) -
A property list can be copied to create a new property - list. The new property list has the same properties and values - as the original property list. - -

-
herr_t H5Pclose (hid_t plist) -
All property lists should be closed when the application is - finished accessing them. This frees resources used by the - property list. - -

-
H5P_class_t H5Pget_class (hid_t plist) -
The class of which the property list is a member can be - obtained by calling this function. The property list classes - are defined above for H5Pcreate(). -
- - -
-
- - - -
- HDF5 documents and links 
- Introduction to HDF5 
- HDF5 Reference Manual 
- HDF5 User's Guide for Release 1.6 
- -
- And in this document, the - HDF5 User's Guide from Release 1.4.5:     -
- Files   - Datasets   - Datatypes   - Dataspaces   - Groups   -
- References   - Attributes   - Property Lists   - Error Handling   -
- Filters   - Caching   - Chunking   - Mounting Files   -
- Performance   - Debugging   - Environment   - DDL   -
-
-
-
-HDF Help Desk -
-Describes HDF5 Release 1.4.5, February 2003 -
- -Last modified: 13 December 1999 - - - - diff --git a/doc/html/RM_H5.html b/doc/html/RM_H5.html deleted file mode 100644 index cafc31a..0000000 --- a/doc/html/RM_H5.html +++ /dev/null @@ -1,650 +0,0 @@ - - -HDF5/H5 API Specification - - - - - - - - - -
-
- - - -
-HDF5 documents and links 
-Introduction to HDF5 
-HDF5 User Guide 
- -
-And in this document, the -HDF5 Reference Manual   -
-H5IM   -H5LT   -H5PT   -H5TB   -
-H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5I   -H5P   -
-H5R   -H5S   -H5T   -H5Z   -Tools   -Datatypes   -
-
-
-

H5: General Library Functions

-
- -These functions serve general-purpose needs of the HDF5 library -and it users. - -

-The C Interfaces: - - - -
- -       - -       - -
-
- -Alphabetical Listing - - - - - - - - - - - - - - -
- -        - -        - -
- -
-The FORTRAN90 Interfaces: -
-In general, each FORTRAN90 subroutine performs exactly the same task -as the corresponding C function. -
- - - -
- -       - -       - -
- - - - - -


-
-
Name: H5check_version -
Signature: -
herr_t H5check_version(unsigned majnum, - unsigned minnum, - unsigned relnum - ) -
Purpose: -
Verifies that library versions are consistent. -
Description: -
H5check_version verifies that the arguments provided - with the function call match the version numbers compiled into - the library. -

- H5check_version serves two slightly differing purposes. -

- First, the function is intended to be called by the user to verify - that the version of the header files compiled into an application - matches the version of the HDF5 library being used. - One may look at the H5check definition in the file - H5public.h as an example. -

- Due to the risks of data corruption or segmentation faults, - H5check_version causes the application to abort if the - version numbers do not match. - The abort is achieved by means of a call to the - standard C function abort(). -

- Note that H5check_version verifies only the - major and minor version numbers and the release number; - it does not verify the sub-release value as that should be - an empty string for any official release. - This means that any two incompatible library versions must - have different {major,minor,release} numbers. (Notice the - reverse is not necessarily true.) -

- Secondarily, H5check_version verifies that the - library version identifiers H5_VERS_MAJOR, - H5_VERS_MINOR, H5_VERS_RELEASE, - H5_VERS_SUBRELEASE, and H5_VERS_INFO - are consistent. - This is designed to catch source code inconsistencies, - but does not generate the fatal error as in the first stage - because this inconsistency does not cause errors in the data files. - If this check reveals inconsistencies, the library issues a warning - but the function does not fail. - -

Parameters: -
    - - - - - - - - - -
    unsigned majnumIN: The major version of the library.
    unsigned minnum    IN: The minor version of the library.
    unsigned relnumIN: The release number of the library.
-
Returns: -
Returns a non-negative value if successful. - Upon failure, this function causes the application to abort. -
Fortran90 Interface: h5check_version_f -
-
-SUBROUTINE h5check_version_f(hdferr) 
-  IMPLICIT NONE
-  INTEGER, INTENT(IN)  :: majnum      ! The major version of the library
-  INTEGER, INTENT(IN)  :: minnum      ! The minor version of the library
-  INTEGER, INTENT(IN)  :: relnum      ! The release number 
-  INTEGER, INTENT(OUT) :: hdferr      ! Error code
-
-END SUBROUTINE h5check_version_f
-	
- - -
- - - -
-
-
Name: H5close -
Signature: -
herr_t H5close(void) -
Purpose: -
Flushes all data to disk, closes file identifiers, and cleans up memory. -
Description: -
H5close flushes all data to disk, - closes all file identifiers, and cleans up all memory used by - the library. This function is generally called when the - application calls exit(), but may be called earlier - in event of an emergency shutdown or out of desire to free all - resources used by the HDF5 library. -

- h5close_f and h5open_f are - required calls in Fortran90 applications. -

Parameters: -
-
None. -
-
Returns: -
Returns a non-negative value if successful; - otherwise returns a negative value. -
Fortran90 Interface: h5close_f -
-
-SUBROUTINE h5close_f(hdferr) 
-  IMPLICIT NONE
-  INTEGER, INTENT(OUT) :: hdferr      ! Error code
-
-END SUBROUTINE h5close_f
-	
- - -
- - - -
-
-
Name: H5dont_atexit -
Signature: -
herr_t H5dont_atexit(void) -
Purpose: -
Instructs library not to install atexit cleanup routine. -
Description: -
H5dont_atexit indicates to the library that an - atexit() cleanup routine should not be installed. - The major purpose for this is in situations where the - library is dynamically linked into an application and is - un-linked from the application before exit() gets - called. In those situations, a routine installed with - atexit() would jump to a routine which was - no longer in memory, causing errors. -

- In order to be effective, this routine must be called - before any other HDF function calls, and must be called each - time the library is loaded/linked into the application - (the first time and after it's been un-loaded). -

Parameters: -
-
None. -
-
Returns: -
Returns a non-negative value if successful; - otherwise returns a negative value. -
Fortran90 Interface: h5dont_atexit_f -
-
-SUBROUTINE h5dont_atexit_f(hdferr) 
-  IMPLICIT NONE
-  INTEGER, INTENT(OUT) :: hdferr      ! Error code
-
-END SUBROUTINE h5dont_atexit_f
-	
- - -
- - - -
-
-
Name: H5garbage_collect -
Signature: -
herr_t H5garbage_collect(void) -
Purpose: -
Garbage collects on all free-lists of all types. -
Description: -
H5garbage_collect walks through all the garbage - collection routines of the library, freeing any unused memory. -

- It is not required that H5garbage_collect be called - at any particular time; it is only necessary in certain situations - where the application has performed actions that cause the library - to allocate many objects. The application should call - H5garbage_collect if it eventually releases those - objects and wants to reduce the memory used by the library from - the peak usage required. -

- The library automatically garbage collects all the free lists - when the application ends. -

Parameters: -
-
None. -
-
Returns: -
Returns a non-negative value if successful; - otherwise returns a negative value. -
Fortran90 Interface: h5garbage_collect_f -
-
-SUBROUTINE h5garbage_collect_f(hdferr) 
-  IMPLICIT NONE
-  INTEGER, INTENT(OUT) :: hdferr      ! Error code
-
-END SUBROUTINE h5garbage_collect_f
-	
- - -
- - - -
-
-
Name: H5get_libversion -
Signature: -
herr_t H5get_libversion(unsigned *majnum, - unsigned *minnum, - unsigned *relnum - ) -
Purpose: -
Returns the HDF library release number. -
Description: -
H5get_libversion retrieves the major, minor, and release - numbers of the version of the HDF library which is linked to - the application. -
Parameters: -
    - - - - - - - - - -
    unsigned *majnumOUT: The major version of the library.
    unsigned *minnum    OUT: The minor version of the library.
    unsigned *relnumOUT: The release number of the library.
-
Returns: -
Returns a non-negative value if successful; - otherwise returns a negative value. -
Fortran90 Interface: h5get_libversion_f -
-
-SUBROUTINE h5get_libversion_f(hdferr) 
-  IMPLICIT NONE
-  INTEGER, INTENT(OUT) :: majnum      ! The major version of the library
-  INTEGER, INTENT(OUT) :: minnum      ! The minor version of the library
-  INTEGER, INTENT(OUT) :: relnum      ! The release number 
-  INTEGER, INTENT(OUT) :: hdferr      ! Error code
-
-END SUBROUTINE h5get_libversion_f
-	
- - -
- - - -
-
-
Name: H5open -
Signature: -
herr_t H5open(void) -
Purpose: -
Initializes the HDF5 library. -
Description: -
H5open initialize the library. -

- When the HDF5 Library is employed in a C application, - this function is normally called automatically, but if you - find that an HDF5 library function is failing inexplicably, - try calling this function first. - If you wish to elimnate this possibility, it is safe to - routinely call H5open before an application - starts working with the library as there are no damaging - side-effects in calling it more than once. -

- When the HDF5 Library is employed in a Fortran90 application, - h5open_f initializes global variables - (e.g. predefined types) and performs other tasks required to - initialize the library. - h5open_f and h5close_f are therefore - required calls in Fortran90 applications. -

Parameters: -
-
None. -
-
Returns: -
Returns a non-negative value if successful; - otherwise returns a negative value. -
Fortran90 Interface: h5open_f -
-
-SUBROUTINE h5open_f(hdferr) 
-  IMPLICIT NONE
-  INTEGER, INTENT(OUT) :: hdferr      ! Error code
-
-END SUBROUTINE h5open_f
-	
- - -
- - - -
-
-
Name: H5set_free_list_limits -
Signature: -
herr_t H5set_free_list_limits(int reg_global_lim, - int reg_list_lim, - int arr_global_lim, - int arr_list_lim, - int blk_global_lim, - int blk_list_lim - ) -
Purpose: -
Sets free-list size limits. -
Description: -
H5set_free_list_limits sets size limits - on all types of free lists. - The HDF5 library uses free lists internally to manage memory. - There are three types of free lists: -
  • Regular - free lists manage a single data structure. -
  • Array - free lists manage arrays of a data structure. -
  • Block - free lists manage blocks of bytes. -
- Alternate phrasing?: -
  • Regular - free lists manage data structures containing atomic data. -
  • Array - free lists manage data structures containing array data. -
  • Block - free lists manage blocks of bytes. -
-

- These are global limits, but each limit applies only to - free lists of the specified type. - Therefore, if an application sets a 1Mb limit on each of - the global lists, up to 3Mb of total storage might be - allocated, 1Mb for each of the regular, array, and - block type lists. -

- Using a value of -1 for a limit means that - no limit is set for the specified type of free list. -

Parameters: -
    - - - - - - - - - - - - - - - - - - -
    int reg_global_lim    IN: The limit on all regular free list memory used
    int reg_list_limIN: The limit on memory used in each regular free list
    int arr_global_limIN: The limit on all array free list memory used
    int arr_list_limIN: The limit on memory used in each array free list
    int blk_global_limIN: The limit on all block free list memory used
    int blk_list_limIN: The limit on memory used in each block free list
-
Returns: -
Returns a non-negative value if successful; - otherwise returns a negative value. -
Fortran90 Interface: -
None. - - - -
- - -
-
- - - -
-HDF5 documents and links 
-Introduction to HDF5 
-HDF5 User Guide 
- -
-And in this document, the -HDF5 Reference Manual   -
-H5IM   -H5LT   -H5PT   -H5TB   -
-H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5I   -H5P   -
-H5R   -H5S   -H5T   -H5Z   -Tools   -Datatypes   -
-
-
-HDF Help Desk -
-Describes HDF5 Release 1.7, the unreleased development branch; working toward HDF5 Release 1.8.0 -
- - - diff --git a/doc/html/RM_H5A.html b/doc/html/RM_H5A.html deleted file mode 100644 index 28a16ff..0000000 --- a/doc/html/RM_H5A.html +++ /dev/null @@ -1,954 +0,0 @@ - - -HDF5/H5A API Specification - - - - - - - - - - - - - -
-
- - - -
-HDF5 documents and links 
-Introduction to HDF5 
-HDF5 User Guide 
- -
-And in this document, the -HDF5 Reference Manual   -
-H5IM   -H5LT   -H5PT   -H5TB   -
-H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5I   -H5P   -
-H5R   -H5S   -H5T   -H5Z   -Tools   -Datatypes   -
-
-
-

H5A: Attribute Interface

-
- -

Attribute API Functions

- -These functions create and manipulate attributes -and information about attributes. - -

-The C Interfaces: - - - -
- -       - -       - -
-
- -Alphabetical Listing - - - - - - - - - - - - - -
- -        - -        - -
- -
-The FORTRAN90 Interfaces: -
-In general, each FORTRAN90 subroutine performs exactly the same task -as the corresponding C function. -
- - - -
- -       - -       - -
- - -

-The Attribute interface, H5A, is primarily designed to easily allow -small datasets to be attached to primary datasets as metadata information. -Additional goals for the H5A interface include keeping storage requirement -for each attribute to a minimum and easily sharing attributes among -datasets. -

-Because attributes are intended to be small objects, large datasets -intended as additional information for a primary dataset should be -stored as supplemental datasets in a group with the primary dataset. -Attributes can then be attached to the group containing everything -to indicate a particular type of dataset with supplemental datasets -is located in the group. How small is "small" is not defined by the -library and is up to the user's interpretation. -

-See Attributes in the -HDF5 User's Guide for further information. - - - - - -


-
-
Name: H5Aclose -
Signature: -
herr_t H5Aclose(hid_t attr_id) -
Purpose: -
Closes the specified attribute. -
Description: -
H5Aclose terminates access to the attribute - specified by attr_id by releasing the identifier. -

- Further use of a released attribute identifier is illegal; - a function using such an identifier will fail. -

Parameters: -
    - - - -
    hid_t attr_idIN: Attribute to release access to.
-
Returns: -
Returns a non-negative value if successful; - otherwise returns a negative value. -
Fortran90 Interface: h5aclose_f -
-
-SUBROUTINE h5aclose_f(attr_id, hdferr) 
-  IMPLICIT NONE
-  INTEGER(HID_T), INTENT(OUT) :: attr_id  ! Attribute identifier 
-  INTEGER, INTENT(OUT) :: hdferr          ! Error code:
-                                          ! 0 on success and -1 on failure
-END SUBROUTINE h5aclose_f	
-	
- - -
- - - -
-
-
Name: H5Acreate -
Signature: -
hid_t H5Acreate(hid_t loc_id, - const char *name, - hid_t type_id, - hid_t space_id, - hid_t create_plist - ) -
Purpose: -
Creates a dataset as an attribute of another group, dataset, - or named datatype. -
Description: -
H5Acreate creates an attribute named name - and attached to the object specified with loc_id. - loc_id is a group, dataset, or named datatype identifier. -

- The attribute name specified in name must be unique. - Attempting to create an attribute with the same name as an already - existing attribute will fail, leaving the pre-existing attribute - in place. To overwrite an existing attribute with a new attribute - of the same name, first call H5Adelete then recreate - the attribute with H5Acreate. -

- The datatype and dataspace identifiers of the attribute, - type_id and space_id, respectively, - are created with the H5T and H5S interfaces, respectively. -

- Currently only simple dataspaces are allowed for attribute dataspaces. -

- The attribute creation property list, create_plist, - is currently unused; - it may be used in the future for optional attribute properties. - At this time, H5P_DEFAULT is the only accepted value. -

- The attribute identifier returned from this function must be released - with H5Aclose or resource leaks will develop. -
Parameters: -
    - - - - - - - - - - - - - - - -
    hid_t loc_idIN: Object (dataset, group, or named datatype) to be attached to.
    const char *nameIN: Name of attribute to create.
    hid_t type_idIN: Identifier of datatype for attribute.
    hid_t space_idIN: Identifier of dataspace for attribute.
    hid_t create_plist    IN: Identifier of creation property list. (Currently unused; - the only accepted value is H5P_DEFAULT.)
-
Returns: -
Returns an attribute identifier if successful; - otherwise returns a negative value. - - - -
Fortran90 Interface: h5acreate_f -
-
-SUBROUTINE h5acreate_f(obj_id, name, type_id, space_id, attr_id, & 
-                       hdferr, creation_prp) 
-  IMPLICIT NONE
-  INTEGER(HID_T), INTENT(IN) :: obj_id    ! Object identifier 
-  CHARACTER(LEN=*), INTENT(IN) :: name    ! Attribute name
-  INTEGER(HID_T), INTENT(IN) :: type_id   ! Attribute datatype identifier 
-  INTEGER(HID_T), INTENT(IN) :: space_id  ! Attribute dataspace identifier
-  INTEGER(HID_T), INTENT(OUT) :: attr_id  ! Attribute identifier 
-  INTEGER, INTENT(OUT) :: hdferr          ! Error code:
-                                          ! 0 on success and -1 on failure
- -
-  INTEGER(HID_T), OPTIONAL, INTENT(IN) :: creation_prp
-                                          ! Attribute creation property 
-                                          ! list identifier 
-END SUBROUTINE h5acreate_f
-	
-
- - - -
-
-
Name: H5Adelete -
Signature: -
herr_t H5Adelete(hid_t loc_id, - const char *name - ) -
Purpose: -
Deletes an attribute from a location. -
Description: -
H5Adelete removes the attribute specified by its - name, name, from a dataset, group, or named datatype. - This function should not be used when attribute identifiers are - open on loc_id as it may cause the internal indexes - of the attributes to change and future writes to the open - attributes to produce incorrect results. -
Parameters: -
    - - - - - - -
    hid_t loc_idIN: Identifier of the dataset, group, or named datatype - to have the attribute deleted from.
    const char *name    IN: Name of the attribute to delete.
-
Returns: -
Returns a non-negative value if successful; - otherwise returns a negative value. -
Fortran90 Interface: h5adelete_f -
-
-SUBROUTINE h5adelete_f(obj_id, name, hdferr) 
-  IMPLICIT NONE
-  INTEGER(HID_T), INTENT(IN) :: obj_id    ! Object identifier 
-  CHARACTER(LEN=*), INTENT(IN) :: name    ! Attribute name
-  INTEGER, INTENT(OUT) :: hdferr          ! Error code:
-                                          ! 0 on success and -1 on failure
-END SUBROUTINE h5adelete_f
-	
- - -
- - - -
-
-
Name: H5Aget_name -
Signature: -
ssize_t H5Aget_name(hid_t attr_id, - size_t buf_size, - char *buf - ) -
Purpose: -
Gets an attribute name. -
Description: -
H5Aget_name retrieves the name of an attribute - specified by the identifier, attr_id. - Up to buf_size characters are stored in - buf followed by a \0 string - terminator. If the name of the attribute is longer than - (buf_size -1), the string terminator is stored in the - last position of the buffer to properly terminate the string. -
Parameters: -
    - - - - - - - - - -
    hid_t attr_idIN: Identifier of the attribute.
    size_t buf_size    IN: The size of the buffer to store the name in.
    char *bufIN: Buffer to store name in.
-
Returns: -
Returns the length of the attribute's name, which may be - longer than buf_size, if successful. - Otherwise returns a negative value. -
Fortran90 Interface: h5aget_name_f -
-
-SUBROUTINE h5aget_name_f(attr_id, size, buf, hdferr) 
-  IMPLICIT NONE
-  INTEGER(HID_T), INTENT(IN) :: attr_id  ! Attribute identifier 
-  INTEGER, INTENT(IN) :: size            ! Buffer size 
-  CHARACTER(LEN=*), INTENT(OUT) :: buf   ! Buffer to hold attribute name
-  INTEGER, INTENT(OUT) :: hdferr         ! Error code: name length  
-                                         ! on success and -1 on failure
-END SUBROUTINE h5aget_name_f
-	
- - -
- - - -
-
-
Name: H5Aget_num_attrs -
Signature: -
int H5Aget_num_attrs(hid_t loc_id) -
Purpose: -
Determines the number of attributes attached to an object. -
Description: -
H5Aget_num_attrs returns the number of attributes - attached to the object specified by its identifier, - loc_id. - The object can be a group, dataset, or named datatype. -
Parameters: -
    - - - -
    hid_t loc_id    IN: Identifier of a group, dataset, or named datatype.
-
Returns: -
Returns the number of attributes if successful; - otherwise returns a negative value. -
Fortran90 Interface: h5aget_num_attrs_f -
-
-SUBROUTINE h5aget_num_attrs_f(obj_id, attr_num, hdferr) 
-  IMPLICIT NONE
-  INTEGER(HID_T), INTENT(IN) :: obj_id  ! Object identifier 
-  INTEGER, INTENT(OUT) :: attr_num      ! Number of attributes of the object
-  INTEGER, INTENT(OUT) :: hdferr        ! Error code:
-                                        ! 0 on success and -1 on failure
-END SUBROUTINE h5aget_num_attrs_f
-	
- - -
- - - -
-
-
Name: H5Aget_space -
Signature: -
hid_t H5Aget_space(hid_t attr_id) -
Purpose: -
Gets a copy of the dataspace for an attribute. -
Description: -
H5Aget_space retrieves a copy of the dataspace - for an attribute. The dataspace identifier returned from - this function must be released with H5Sclose - or resource leaks will develop. -
Parameters: -
    - - - -
    hid_t attr_id    IN: Identifier of an attribute.
-
Returns: -
Returns attribute dataspace identifier if successful; - otherwise returns a negative value. -
Fortran90 Interface: h5aget_space_f -
-
-SUBROUTINE h5aget_space_f(attr_id, space_id, hdferr) 
-  IMPLICIT NONE
-  INTEGER(HID_T), INTENT(IN) :: attr_id   ! Attribute identifier 
-  INTEGER(HID_T), INTENT(OUT) :: space_id ! Attribute dataspace identifier
-  INTEGER, INTENT(OUT) :: hdferr          ! Error code:
-                                          ! 0 on success and -1 on failure
-END SUBROUTINE h5aget_space_f
-	
- - -
- - - -
-
-
Name: H5Aget_type -
Signature: -
hid_t H5Aget_type(hid_t attr_id) -
Purpose: -
Gets an attribute datatype. -
Description: -
H5Aget_type retrieves a copy of the datatype - for an attribute. -

- The datatype is reopened if it is a named type before returning - it to the application. The datatypes returned by this function - are always read-only. If an error occurs when atomizing the - return datatype, then the datatype is closed. -

- The datatype identifier returned from this function must be - released with H5Tclose or resource leaks will develop. -

Parameters: -
    - - - -
    hid_t attr_id    IN: Identifier of an attribute.
-
Returns: -
Returns a datatype identifier if successful; - otherwise returns a negative value. -
Fortran90 Interface: h5aget_type_f -
-
-SUBROUTINE h5aget_type_f(attr_id, type_id, hdferr) 
-  IMPLICIT NONE
-  INTEGER(HID_T), INTENT(IN) :: attr_id  ! Attribute identifier 
-  INTEGER(HID_T), INTENT(OUT) :: type_id ! Attribute datatype identifier
-  INTEGER, INTENT(OUT) :: hdferr         ! Error code:
-                                         ! 0 on success and -1 on failure
-END SUBROUTINE h5aget_type_f
-	
- - -
- - - -
-
-
Name: H5Aiterate -
Signature: -
herr_t H5Aiterate(hid_t loc_id, - unsigned * idx, - H5A_operator_t op, - void *op_data - ) -
Purpose: -
Calls a user's function for each attribute on an object. -
Description: -
H5Aiterate iterates over the attributes of - the object specified by its identifier, loc_id. - The object can be a group, dataset, or named datatype. - For each attribute of the object, the op_data - and some additional information specified below are passed - to the operator function op. - The iteration begins with the attribute specified by its - index, idx; the index for the next attribute - to be processed by the operator, op, is - returned in idx. - If idx is the null pointer, then all attributes - are processed. -

- The prototype for H5A_operator_t is:
- typedef herr_t (*H5A_operator_t)(hid_t loc_id, - const char *attr_name, - void *operator_data); - -

- The operation receives the identifier for the group, dataset - or named datatype being iterated over, loc_id, the - name of the current attribute about the object, attr_name, - and the pointer to the operator data passed in to H5Aiterate, - op_data. The return values from an operator are: -

    -
  • Zero causes the iterator to continue, returning zero when all - attributes have been processed. -
  • Positive causes the iterator to immediately return that positive - value, indicating short-circuit success. The iterator can be - restarted at the next attribute. -
  • Negative causes the iterator to immediately return that value, - indicating failure. The iterator can be restarted at the next - attribute. -
-
Parameters: -
    - - - - - - - - - - - - -
    hid_t loc_idIN: Identifier of a group, dataset or named datatype.
    unsigned * idxIN/OUT: Starting (IN) and ending (OUT) attribute index.
    H5A_operator_t op    IN: User's function to pass each attribute to
    void *op_dataIN/OUT: User's data to pass through to iterator operator function
-
Returns: -
If successful, returns the return value of the last operator - if it was non-zero, or zero if all attributes were processed. - Otherwise returns a negative value. -
Fortran90 Interface: -
None. - - - -
- - - -
-
-
Name: H5Aopen_idx -
Signature: -
hid_t H5Aopen_idx(hid_t loc_id, - unsigned int idx - ) -
Purpose: -
Opens the attribute specified by its index. -
Description: -
H5Aopen_idx opens an attribute which is attached - to the object specified with loc_id. - The location object may be either a group, dataset, or - named datatype, all of which may have any sort of attribute. - The attribute specified by the index, idx, - indicates the attribute to access. - The value of idx is a 0-based, non-negative integer. - The attribute identifier returned from this function must be - released with H5Aclose or resource leaks will develop. -
Parameters: -
    - - - - - - -
    hid_t loc_idIN: Identifier of the group, dataset, or named datatype - attribute to be attached to.
    unsigned int idx    IN: Index of the attribute to open.
-
Returns: -
Returns attribute identifier if successful; - otherwise returns a negative value. -
Fortran90 Interface: h5aopen_idx_f -
-
-SUBROUTINE h5aopen_idx_f(obj_id, index, attr_id, hdferr) 
-  IMPLICIT NONE
-  INTEGER(HID_T), INTENT(IN) :: obj_id    ! Object identifier 
-  INTEGER, INTENT(IN) :: index            ! Attribute index 
-  INTEGER(HID_T), INTENT(OUT) :: attr_id  ! Attribute identifier 
-  INTEGER, INTENT(OUT) :: hdferr          ! Error code:
-                                          ! 0 on success and -1 on failure
-END SUBROUTINE h5aopen_idx_f
-	
- - -
- - - -
-
-
Name: H5Aopen_name -
Signature: -
hid_t H5Aopen_name(hid_t loc_id, - const char *name - ) -
Purpose: -
Opens an attribute specified by name. -
Description: -
H5Aopen_name opens an attribute specified by - its name, name, which is attached to the - object specified with loc_id. - The location object may be either a group, dataset, or - named datatype, which may have any sort of attribute. - The attribute identifier returned from this function must - be released with H5Aclose or resource leaks - will develop. -
Parameters: -
    - - - - - - -
    hid_t loc_idIN: Identifier of a group, dataset, or named datatype - atttribute to be attached to.
    const char *name    IN: Attribute name.
-
Returns: -
Returns attribute identifier if successful; - otherwise returns a negative value. -
Fortran90 Interface: h5aopen_name_f -
-
-SUBROUTINE h5aopen_name_f(obj_id, name, attr_id, hdferr) 
-  IMPLICIT NONE
-  INTEGER(HID_T), INTENT(IN) :: obj_id    ! Object identifier 
-  CHARACTER(LEN=*), INTENT(IN) :: name    ! Attribute name
-  INTEGER(HID_T), INTENT(OUT) :: attr_id  ! Attribute identifier 
-  INTEGER, INTENT(OUT) :: hdferr          ! Error code:
-                                          ! 0 on success and -1 on failure
-END SUBROUTINE h5aopen_name_f
-	
- - -
- - - -
-
-
Name: H5Aread -
Signature: -
herr_t H5Aread(hid_t attr_id, - hid_t mem_type_id, - void *buf - ) -
Purpose: -
Reads an attribute. -
Description: -
H5Aread reads an attribute, specified with - attr_id. The attribute's memory datatype - is specified with mem_type_id. The entire - attribute is read into buf from the file. -

- Datatype conversion takes place at the time of a read or write - and is automatic. See the - Data Conversion - section of The Data Type Interface (H5T) in the - HDF5 User's Guide for a discussion of - data conversion, including the range of conversions currently - supported by the HDF5 libraries. -

Parameters: -
    - - - - - - - - - -
    hid_t attr_idIN: Identifier of an attribute to read.
    hid_t mem_type_id    IN: Identifier of the attribute datatype (in memory).
    void *bufOUT: Buffer for data to be read.
-
Returns: -
Returns a non-negative value if successful; - otherwise returns a negative value. -
Fortran90 Interface: h5aread_f -
-
-SUBROUTINE h5aread_f(attr_id, memtype_id,  buf, dims, hdferr) 
-  IMPLICIT NONE
-  INTEGER(HID_T), INTENT(IN) :: attr_id    ! Attribute identifier 
-  INTEGER(HID_T), INTENT(IN) :: memtype_id ! Attribute datatype 
-                                           ! identifier  (in memory)
-  TYPE, INTENT(INOUT)  :: buf              ! Data buffer; may be a scalar or 
-                                           ! an array
-  DIMENSION(*), INTEGER(HSIZE_T), INTENT(IN)  :: dims 
-                                           ! Array to hold corresponding 
-                                           ! dimension sizes of data buffer buf;
-                                           ! dim(k) has value of the 
-                                           ! k-th dimension of buffer buf;
-                                           ! values are ignored if buf is a 
-                                           ! scalar
-  INTEGER, INTENT(OUT) :: hdferr           ! Error code:
-                                           ! 0 on success and -1 on failure
-END SUBROUTINE h5aread_f
-	
- - -
- - - -
-
-
Name: H5Awrite -
Signature: -
herr_t H5Awrite(hid_t attr_id, - hid_t mem_type_id, - const void *buf - ) -
Purpose: -
Writes data to an attribute. -
Description: -
H5Awrite writes an attribute, specified with - attr_id. The attribute's memory datatype - is specified with mem_type_id. The entire - attribute is written from buf to the file. -

- Datatype conversion takes place at the time of a read or write - and is automatic. See the - Data Conversion - section of The Data Type Interface (H5T) in the - HDF5 User's Guide for a discussion of - data conversion, including the range of conversions currently - supported by the HDF5 libraries. -

Parameters: -
    - - - - - - - - - -
    hid_t attr_idIN: Identifier of an attribute to write.
    hid_t mem_type_id    IN: Identifier of the attribute datatype (in memory).
    const void *bufIN: Data to be written.
-
Returns: -
Returns a non-negative value if successful; - otherwise returns a negative value. -
Fortran90 Interface: h5awrite_f -
-
-SUBROUTINE h5awrite_f(attr_id, memtype_id,  buf, dims, hdferr) 
-  IMPLICIT NONE
-  INTEGER(HID_T), INTENT(IN) :: attr_id    ! Attribute identifier 
-  INTEGER(HID_T), INTENT(IN) :: memtype_id ! Attribute datatype 
-                                           ! identifier  (in memory)
-  TYPE, INTENT(IN) :: buf                  ! Data buffer; may be a scalar or 
-                                           ! an array
-  DIMENSION(*), INTEGER(HSIZE_T), INTENT(IN)  :: dims 
-                                           ! Array to hold corresponding 
-                                           ! dimension sizes of data buffer buf;
-                                           ! dim(k) has value of the k-th 
-                                           ! dimension of buffer buf;
-                                           ! values are ignored if buf is 
-                                           ! a scalar
-  INTEGER, INTENT(OUT) :: hdferr           ! Error code:
-                                           ! 0 on success and -1 on failure
-END SUBROUTINE h5awrite_f
-	
- - -
- -
-
- - - -
-HDF5 documents and links 
-Introduction to HDF5 
-HDF5 User Guide 
- -
-And in this document, the -HDF5 Reference Manual   -
-H5IM   -H5LT   -H5PT   -H5TB   -
-H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5I   -H5P   -
-H5R   -H5S   -H5T   -H5Z   -Tools   -Datatypes   -
-
-
-HDF Help Desk -
-Describes HDF5 Release 1.7, the unreleased development branch; working toward HDF5 Release 1.8.0 -
- - - diff --git a/doc/html/RM_H5D.html b/doc/html/RM_H5D.html deleted file mode 100644 index f6e313a..0000000 --- a/doc/html/RM_H5D.html +++ /dev/null @@ -1,1584 +0,0 @@ - - -HDF5/H5D API Specification - - - - - - - - - - - - - -
-
- - - -
-HDF5 documents and links 
-Introduction to HDF5 
-HDF5 User Guide 
- -
-And in this document, the -HDF5 Reference Manual   -
-H5IM   -H5LT   -H5PT   -H5TB   -
-H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5I   -H5P   -
-H5R   -H5S   -H5T   -H5Z   -Tools   -Datatypes   -
-
-
-

H5D: Datasets Interface

-
- -

Dataset Object API Functions

- -These functions create and manipulate dataset objects, -and set and retrieve their constant or persistent properties. - -

-The C Interfaces: - - - -
- -       - -       - -
-
- -Alphabetical Listing - - - - - - - - - - - - - -
- -        - -        - -
- -
-The FORTRAN90 Interfaces: -
-In general, each FORTRAN90 subroutine performs exactly the same task -as the corresponding C function. -
- - - -
- -       - -       - -
- - - - - -


-
-
Name: H5Dclose -
Signature: -
herr_t H5Dclose(hid_t dataset_id - ) -
Purpose: -
Closes the specified dataset. -
Description: -
H5Dclose ends access to a dataset specified by - dataset_id and releases resources used by it. - Further use of the dataset identifier is illegal in calls to - the dataset API. -
Parameters: -
    - - - -
    hid_t dataset_id    IN: Identifier of the dataset to close access to.
-
Returns: -
Returns a non-negative value if successful; - otherwise returns a negative value. -
Fortran90 Interface: h5dclose_f -
-
-SUBROUTINE h5dclose_f(dset_id, hdferr)
-  IMPLICIT NONE
-  INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier  
-  INTEGER, INTENT(OUT) :: hdferr        ! Error code  
-                                        ! 0 on success and -1 on failure
-END SUBROUTINE h5dclose_f
-	
- - -
- - - -
-
-
Name: H5Dcreate -
Signature: -
hid_t H5Dcreate(hid_t loc_id, - const char *name, - hid_t type_id, - hid_t space_id, - hid_t create_plist_id - ) -
Purpose: -
Creates a dataset at the specified location. -
Description: -
H5Dcreate creates a data set with a name, - name, in the file or in the group specified by - the identifier loc_id. - The dataset has the datatype and dataspace identified by - type_id and space_id, respectively. - The specified datatype and dataspace are the datatype and - dataspace of the dataset as it will exist in the file, - which may be different than in application memory. - Dataset creation properties are specified by the argument - create_plist_id. -

- Dataset names within a group are unique: - H5Dcreate will return an error if a dataset with - the name specified in name already exists at the - location specified in loc_id. -

- create_plist_id is a H5P_DATASET_CREATE - property list created with H5Pcreate and - initialized with the various functions described above. -

- H5Dcreate returns an error if the dataset's datatype - includes a variable-length (VL) datatype and the fill value - is undefined, i.e., set to NULL in the - dataset creation property list. - Such a VL datatype may be directly included, - indirectly included as part of a compound or array datatype, or - indirectly included as part of a nested compound or array datatype. -

- H5Dcreate returns a dataset identifier for success - or a negative value for failure. - The dataset identifier should eventually be closed by - calling H5Dclose to release resources it uses. -

- Fill values and space allocation:
- The HDF5 library provides flexible means - of specifying a fill value, - of specifying when space will be allocated for a dataset, and - of specifying when fill values will be written to a dataset. - For further information on these topics, see the document - - Fill Value and Dataset Storage Allocation Issues in HDF5 - and the descriptions of the following HDF5 functions in this - HDF5 Reference Manual: - - -
  - - H5Dfill
- H5Pset_fill_value
- H5Pget_fill_value
- H5Pfill_value_defined -
- H5Pset_fill_time
- H5Pget_fill_time
- H5Pset_alloc_time
- H5Pget_alloc_time -
- This information is also included in the - “HDF5 Datasets” chapter of - the new HDF5 User's Guide, - which is being prepared for release. -

Note: -
H5Dcreate can fail if there has been an error - in setting up an element of the dataset creation property list. - In such cases, each item in the property list must be examined - to ensure that the setup satisfies to all required conditions. - This problem is most likely to occur with the use of filters. -

- For example, H5Dcreate will fail without a meaningful - explanation if -

    -
  • SZIP compression is being used on the dataset and -
  • the SZIP parameter pixels_per_block - is set to an inappropriate value. -
-

- In such a case, one would refer to the description of - H5Pset_szip, - looking for any conditions or requirements that might affect the - local computing environment. - -

Parameters: -
    - - - - - - - - - - - - - - - -
    hid_t loc_idIN: Identifier of the file or group - within which to create the dataset.
    const char * nameIN: The name of the dataset to create.
    hid_t type_idIN: Identifier of the datatype to use - when creating the dataset.
    hid_t space_idIN: Identifier of the dataspace to use - when creating the dataset.
    hid_t create_plist_id    IN: Identifier of the set creation property list.
- -
Returns: -
Returns a dataset identifier if successful; - otherwise returns a negative value. -
Fortran90 Interface: h5dcreate_f -
-
-SUBROUTINE h5dcreate_f(loc_id, name, type_id, space_id, dset_id, & 
-                       hdferr, creation_prp) 
-  IMPLICIT NONE 
-  INTEGER(HID_T), INTENT(IN) :: loc_id   ! File or group identifier
-  CHARACTER(LEN=*), INTENT(IN) :: name   ! Name of the dataset 
-  INTEGER(HID_T), INTENT(IN) :: type_id  ! Datatype identifier 
-  INTEGER(HID_T), INTENT(IN) :: space_id ! Dataspace identifier
-  INTEGER(HID_T), INTENT(OUT) :: dset_id ! Dataset identifier
-  INTEGER, INTENT(OUT) :: hdferr         ! Error code 
-                                         ! 0 on success and -1 on failure
-  INTEGER(HID_T), OPTIONAL, INTENT(IN) :: creation_prp
-                                         ! Dataset creation propertly 
-                                         ! list identifier , default
-                                         ! value is H5P_DEFAULT_F (6) 
-END SUBROUTINE h5dcreate_f
-	
- - -
- - - -
-
-
Name: H5Dextend -
Signature: -
herr_t H5Dextend(hid_t dataset_id, - const hsize_t * size - ) -
Purpose: -
Extends a dataset with unlimited dimension. -
Description: -
H5Dextend verifies that the dataset is at least of size - size. - The dimensionality of size is the same as that of - the dataspace of the dataset being changed. - This function cannot be applied to a dataset with fixed dimensions. -

- Space on disk is immediately allocated for the new dataset extent - if the dataset's space allocation time is set to - H5D_ALLOC_TIME_EARLY. - Fill values will be written to the dataset if the dataset's fill time - is set to H5D_FILL_TIME_IFSET or - H5D_FILL_TIME_ALLOC. - (Also see - H5Pset_fill_time - and - H5Pset_alloc_time.) - -

Parameters: -
    - - - - - - -
    hid_t dataset_idIN: Identifier of the dataset.
    const hsize_t * size    IN: Array containing the new magnitude of each dimension.
-
Returns: -
Returns a non-negative value if successful; - otherwise returns a negative value. -
Fortran90 Interface: h5dextend_f -
-
-SUBROUTINE h5dextend_f(dataset_id, size, hdferr) 
-  IMPLICIT NONE 
-  INTEGER(HID_T), INTENT(IN) :: dataset_id   ! Dataset identifier
-  INTEGER(HSIZE_T), DIMENSION(*), INTENT(IN)  :: size
-                                             ! Array containing 
-                                             ! dimensions' sizes 
-  INTEGER, INTENT(OUT) :: hdferr             ! Error code 
-                                             ! 0 on success and -1 on failure
-END SUBROUTINE h5dextend_f 
-	
- - -
- - - -
-
-
Name: H5Dfill -
Signature: -
herr_t H5Dfill( - const void *fill, - hid_t fill_type_id, - void *buf, - hid_t buf_type_id, - hid_t space_id - ) -
Purpose: -
Fills dataspace elements with a fill value in a memory buffer. -
Description: -
H5Dfill explicitly fills - the dataspace selection in memory, space_id, - with the fill value specified in fill. - If fill is NULL, - a fill value of 0 (zero) is used. -

- fill_type_id specifies the datatype - of the fill value.
- buf specifies the buffer in which - the dataspace elements will be written.
- buf_type_id specifies the datatype of - those data elements. -

- Note that if the fill value datatype differs - from the memory buffer datatype, the fill value - will be converted to the memory buffer datatype - before filling the selection. -

Note: -
Applications sometimes write data only to portions of - an allocated dataset. It is often useful in such cases - to fill the unused space with a known - fill value. - See H5Pset_fill_value - for further discussion. - Other related functions include - H5Pget_fill_value, - H5Pfill_value_defined, - H5Pset_fill_time, - H5Pget_fill_time, - and - H5Dcreate. -
Parameters: -
    - - - - - - - - - - - - - - - -
    const void *fillIN: Pointer to the fill value to be used.
    hid_t fill_type_id    IN: Fill value datatype identifier.
    void *bufIN/OUT: Pointer to the memory buffer containing the - selection to be filled.
    hid_t buf_type_idIN: Datatype of dataspace elements to be filled.
    hid_t space_idIN: Dataspace describing memory buffer and - containing the selection to be filled.
-
Returns: -
Returns a non-negative value if successful; - otherwise returns a negative value. -
Fortran90 Interface: h5dfill_f -
-
-SUBROUTINE h5dfill_f(fill_value, space_id, buf, hdferr)
-  IMPLICIT NONE
-  TYPE, INTENET(IN) :: fill_value        ! Fill value; may be have one of the
-                                         ! following types:
-                                         ! INTEGER, REAL, DOUBLE PRECISION, 
-                                         ! CHARACTER
-  INTEGER(HID_T), INTENT(IN) :: space_id ! Memory dataspace selection identifier 
-  TYPE, DIMENSION(*) :: buf              ! Memory buffer to fill in; must have
-                                         ! the same datatype as fill value
-  INTEGER, INTENT(OUT) :: hdferr         ! Error code  
-                                         ! 0 on success and -1 on failure
-END SUBROUTINE h5dfill_f
-	
- - -
- - - -
-
-
Name: H5Dget_create_plist -
Signature: -
hid_t H5Dget_create_plist(hid_t dataset_id - ) -
Purpose: -
Returns an identifier for a copy of the - dataset creation property list for a dataset. -
Description: -
H5Dget_create_plist returns an identifier for a - copy of the dataset creation property list for a dataset. - The creation property list identifier should be released with - the H5Pclose function. -
Parameters: -
    - - - -
    hid_t dataset_id    IN: Identifier of the dataset to query.
-
Returns: -
Returns a dataset creation property list identifier if successful; - otherwise returns a negative value. -
Fortran90 Interface: h5dget_create_plist_f -
-
-SUBROUTINE h5dget_create_plist_f(dataset_id, creation_prp, hdferr) 
-  IMPLICIT NONE 
-  INTEGER(HID_T), INTENT(IN) :: dataset_id    ! Dataset identifier
-  INTEGER(HID_T), INTENT(OUT) :: creation_id  ! Dataset creation
-                                              ! property list identifier
-  INTEGER, INTENT(OUT) :: hdferr              ! Error code 
-                                              ! 0 on success and -1 on failure
-END SUBROUTINE h5dget_create_plist_f  
-
-	
- - -
- - - -
-
-
Name: H5Dget_offset -
Signature: -
haddr_t H5Dget_offset(hid_t dset_id) -
Purpose: -
Returns dataset address in file. -
Description: -
H5Dget_offset returns the address in the file - of the dataset dset_id. - That address is expressed as the offset in bytes from - the beginning of the file. -
Parameters: -
    - - - -
    hid_t dset_id    Dataset identifier.
-
Returns: -
Returns the offset in bytes; - otherwise returns HADDR_UNDEF, a negative value. -
Fortran90 Interface: -
None. - - - -
- - - -
-
-
Name: H5Dget_space -
Signature: -
hid_t H5Dget_space(hid_t dataset_id - ) -
Purpose: -
Returns an identifier for a copy of the dataspace for a dataset. -
Description: -
H5Dget_space returns an identifier for a copy of the - dataspace for a dataset. - The dataspace identifier should be released with the - H5Sclose function. -
Parameters: -
    - - - -
    hid_t dataset_id    IN: Identifier of the dataset to query.
-
Returns: -
Returns a dataspace identifier if successful; - otherwise returns a negative value. -
Fortran90 Interface: h5dget_space_f -
-
-SUBROUTINE h5dget_space_f(dataset_id, dataspace_id, hdferr) 
-  IMPLICIT NONE 
-  INTEGER(HID_T), INTENT(IN) :: dataset_id      ! Dataset identifier
-  INTEGER(HID_T), INTENT(OUT) :: dataspace_id   ! Dataspace identifier
-  INTEGER, INTENT(OUT) :: hdferr                ! Error code 
-                                                ! 0 on success and -1 on failure
-END SUBROUTINE h5dget_space_f
-	
- - -
- - - -
-
-
Name: H5Dget_space_status -
Signature: -
herr_t H5Dget_space_status(hid_t dset_id, - H5D_space_status_t *status) -
Purpose: -
Determines whether space has been allocated for a dataset. -
Description: -
H5Dget_space_status determines whether space has been - allocated for the dataset dset_id. -

- Space allocation status is returned in status, - which will have one of the following values: -

- - -
     - H5D_SPACE_STATUS_NOT_ALLOCATED - - Space has not been allocated for this dataset. -
- H5D_SPACE_STATUS_ALLOCATED - - Space has been allocated for this dataset. -
- H5D_SPACE_STATUS_PART_ALLOCATED   - - Space has been partially allocated for this dataset. - (Used only for datasets with chunked storage.) -
-
-
Parameters: -
    - - - - - - -
    hid_t dset_idIN: Identifier of the dataset to query.
    H5D_space_status_t *status    OUT: Space allocation status.
-
Returns: -
Returns a non-negative value if successful; - otherwise returns a negative value. -
Fortran90 Interface: h5dget_space_status_f -
-
-SUBROUTINE h5dget_space_status_f(dset_id, flag, hdferr)
-  IMPLICIT NONE
-  INTEGER(HID_T), INTENT(IN) :: dset_id  ! Dataset identifier 
-  INTEGER, INTENET(OUT)      :: flag     ! Status flag ; possible values:
-                                         ! H5D_SPACE_STS_ERROR_F
-                                         ! H5D_SPACE_STS_NOT_ALLOCATED_F
-                                         ! H5D_SPACE_STS_PART_ALLOCATED_F
-                                         ! H5D_SPACE_STS_ALLOCATED_F
-  INTEGER, INTENT(OUT) :: hdferr         ! Error code  
-                                         ! 0 on success and -1 on failure
-END SUBROUTINE h5dget_space_status_f
-	
- - -
- - - -
-
-
Name: H5Dget_storage_size -
Signature: -
hsize_t H5Dget_storage_size(hid_t dataset_id - ) -
Purpose: -
Returns the amount of storage required for a dataset. -
Description: -
H5Dget_storage_size returns the amount of storage - that is required for the specified dataset, dataset_id. - For chunked datasets, this is the number of allocated chunks times - the chunk size. - The return value may be zero if no data has been stored. -
Parameters: -
    - - - -
    hid_t dataset_id    IN: Identifier of the dataset to query.
-
Returns: -
Returns the amount of storage space allocated for the dataset, - not counting meta data; - otherwise returns 0 (zero). -
Fortran90 Interface: h5dget_storage_size_f -
-
-SUBROUTINE h5dget_storage_size_f(dset_id, size, hdferr)
-  IMPLICIT NONE
-  INTEGER(HID_T), INTENT(IN) :: dset_id  ! Dataset identifier  
-  INTEGER(HSIZE_T), INTENT(OUT)  :: size ! Amount of storage required 
-                                         ! for dataset
-  INTEGER, INTENT(OUT) :: hdferr         ! Error code  
-                                         ! 0 on success and -1 on failure
-END SUBROUTINE h5dget_storage_size_f
-	
- - -
- - - -
-
-
Name: H5Dget_type -
Signature: -
hid_t H5Dget_type(hid_t dataset_id - ) -
Purpose: -
Returns an identifier for a copy of the datatype for a dataset. -
Description: -
H5Dget_type returns an identifier for a copy of the - datatype for a dataset. - The datatype should be released with the H5Tclose function. -

- If a dataset has a named datatype, then an identifier to the - opened datatype is returned. - Otherwise, the returned datatype is read-only. - If atomization of the datatype fails, then the datatype is closed. -

Parameters: -
    - - - -
    hid_t dataset_id    IN: Identifier of the dataset to query.
-
Returns: -
Returns a datatype identifier if successful; - otherwise returns a negative value. -
Fortran90 Interface: h5dget_type_f -
-
-SUBROUTINE h5dget_type_f(dataset_id, datatype_id, hdferr) 
-  IMPLICIT NONE 
-  INTEGER(HID_T), INTENT(IN) :: dataset_id    ! Dataset identifier
-  INTEGER(HID_T), INTENT(OUT) :: datatype_id  ! Datatype identifier
-  INTEGER, INTENT(OUT) :: hdferr              ! Error code 
-                                              ! 0 on success and -1 on failure
-END SUBROUTINE h5dget_type_f 
-	
- - -
- - - -
-
-
Name: H5Diterate -
Signature: -
herr_t H5Diterate( - void *buf, - hid_t type_id, - hid_t space_id, - H5D_operator_t operator, - void *operator_data - ) -
Purpose: -
Iterates over all selected elements in a dataspace. -
Description: -
H5Diterate iterates over all the elements selected - in a memory buffer. The callback function is called once for each - element selected in the dataspace. -

- The selection in the dataspace is modified so that any elements - already iterated over are removed from the selection if the - iteration is interrupted (by the H5D_operator_t - function returning non-zero) before the iteration is complete; - the iteration may then be re-started by the user where it left off. - -

Parameters: -
    - - - - - - - - - - - - - - - -
    void *bufIN/OUT: Pointer to the buffer in memory containing the - elements to iterate over.
    hid_t type_idIN: Datatype identifier for the elements stored in - buf.
    hid_t space_idIN: Dataspace identifier for buf. - Also contains the selection to iterate over.
    H5D_operator_t operator    IN: Function pointer to the routine to be called - for each element in buf iterated over.
    void *operator_dataIN/OUT: Pointer to any user-defined data associated - with the operation.
-
Returns: -
Returns the return value of the last operator if it was non-zero, - or zero if all elements have been processed. - Otherwise returns a negative value. -
Fortran90 Interface: -
None. - - - -
- - - -
-
-
Name: H5Dopen -
Signature: -
hid_t H5Dopen(hid_t loc_id, - const char *name - ) -
Purpose: -
Opens an existing dataset. -
Description: -
H5Dopen opens an existing dataset for access in the file - or group specified in loc_id. name is - a dataset name and is used to identify the dataset in the file. -
Parameters: -
    - - - - - - -
    hid_t loc_idIN: Identifier of the file or group - within which the dataset to be accessed will be found.
    const char * name    IN: The name of the dataset to access.
-
Returns: -
Returns a dataset identifier if successful; - otherwise returns a negative value. -
Fortran90 Interface: h5dopen_f -
-
-SUBROUTINE h5dopen_f(loc_id, name, dset_id, hdferr) 
-  IMPLICIT NONE 
-  INTEGER(HID_T), INTENT(IN) :: loc_id   ! File or group identifier
-  CHARACTER(LEN=*), INTENT(IN) :: name   ! Name of the dataset 
-  INTEGER(HID_T), INTENT(OUT) :: dset_id ! Dataset identifier
-  INTEGER, INTENT(OUT) :: hdferr         ! Error code 
-                                         ! 0 on success and -1 on failure
-END SUBROUTINE h5dopen_f
-	
- - -
- - - -
-
-
Name: H5Dread -
Signature: -
herr_t H5Dread(hid_t dataset_id, - hid_t mem_type_id, - hid_t mem_space_id, - hid_t file_space_id, - hid_t xfer_plist_id, - void * buf - ) -
Purpose: -
Reads raw data from a dataset into a buffer. -
Description: -
H5Dread reads a (partial) dataset, specified by its - identifier dataset_id, from the - file into an application memory buffer buf. - Data transfer properties are defined by the argument - xfer_plist_id. - The memory datatype of the (partial) dataset is identified by - the identifier mem_type_id. - The part of the dataset to read is defined by - mem_space_id and file_space_id. -

- file_space_id is used to specify only the selection within - the file dataset's dataspace. Any dataspace specified in file_space_id - is ignored by the library and the dataset's dataspace is always used. - file_space_id can be the constant H5S_ALL. - which indicates that the entire file dataspace, as defined by the - current dimensions of the dataset, is to be selected. -

- mem_space_id is used to specify both the memory dataspace - and the selection within that dataspace. - mem_space_id can be the constant H5S_ALL, - in which case the file dataspace is used for the memory dataspace and - the selection defined with file_space_id is used for the - selection within that dataspace. -

- If raw data storage space has not been allocated for the dataset - and a fill value has been defined, the returned buffer buf - is filled with the fill value. -

- The behavior of the library for the various combinations of valid - dataspace identifiers and H5S_ALL for the mem_space_id and the - file_space_id parameters is described below: - -

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- mem_space_id   - - file_space_id   - - Behavior -
- valid dataspace identifier - - valid dataspace identifier - - mem_space_id specifies the memory dataspace and the - selection within it. - file_space_id specifies the selection within the file - dataset's dataspace. -
- H5S_ALL - - valid dataspace identifier - - The file dataset's dataspace is used for the memory dataspace and the - selection specified with file_space_id specifies the - selection within it. - The combination of the file dataset's dataspace and the selection from - file_space_id is used for memory also. -
- valid dataspace identifier - - H5S_ALL - - mem_space_id specifies the memory dataspace and the - selection within it. - The selection within the file dataset's dataspace is set to the "all" - selection. -
- H5S_ALL - - H5S_ALL - - The file dataset's dataspace is used for the memory dataspace and the - selection within the memory dataspace is set to the "all" selection. - The selection within the file dataset's dataspace is set to the "all" - selection. -
- -

- Setting an H5S_ALL selection indicates that the entire dataspace, as - defined by the current dimensions of a dataspace, will be selected. - The number of elements selected in the memory dataspace must match the - number of elements selected in the file dataspace. -

- xfer_plist_id can be the constant H5P_DEFAULT. - in which case the default data transfer properties are used. -

- Data is automatically converted from the file datatype - and dataspace to the memory datatype and dataspace - at the time of the read. See the - Data Conversion - section of The Data Type Interface (H5T) in the - HDF5 User's Guide for a discussion of - data conversion, including the range of conversions currently - supported by the HDF5 libraries. -

Parameters: -
    - - - - - - - - - - - - - - - - - - -
    hid_t dataset_idIN: Identifier of the dataset read from.
    hid_t mem_type_idIN: Identifier of the memory datatype.
    hid_t mem_space_idIN: Identifier of the memory dataspace.
    hid_t file_space_id    IN: Identifier of the dataset's dataspace in the file.
    hid_t xfer_plist_id    IN: Identifier of a transfer property list - for this I/O operation.
    void * bufOUT: Buffer to receive data read from file.
-
Returns: -
Returns a non-negative value if successful; - otherwise returns a negative value. -
Fortran90 Interface: h5dread_f - -
-
-SUBROUTINE h5dread_f(dset_id, mem_type_id, buf, dims, hdferr, & 
-                     mem_space_id, file_space_id, xfer_prp)
-
-  IMPLICIT NONE
-  INTEGER(HID_T), INTENT(IN) :: dset_id     ! Dataset identifier
-  INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
-  TYPE, INTENT(INOUT) :: buf                ! Data buffer; may be a scalar 
-                                            ! or an array
-  DIMENSION(*), INTEGER(HSIZE_T), INTENT(IN)  :: dims 
-                                            ! Array to hold corresponding 
-                                            ! dimension sizes of data 
-                                            ! buffer buf 
-                                            ! dim(k) has value of the k-th 
-                                            ! dimension of buffer buf
-                                            ! Values are ignored if buf is 
-                                            ! a scalar
-  INTEGER, INTENT(OUT) :: hdferr            ! Error code 
-                                            ! 0 on success and -1 on failure
-  INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id 
-                                            ! Memory dataspace identfier 
-                                            ! Default value is H5S_ALL_F 
-  INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id 
-                                            ! File dataspace identfier 
-                                            ! Default value is H5S_ALL_F
-  INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp 
-                                            ! Transfer property list identifier
-                                            ! Default value is H5P_DEFAULT_F             
-END SUBROUTINE h5dread_f
-	
- - -
- - - -
-
-
Name: H5Dvlen_get_buf_size -
Signature: -
herr_t H5Dvlen_get_buf_size(hid_t dataset_id, - hid_t type_id, - hid_t space_id, - hsize_t *size - ) -
Purpose: -
Determines the number of bytes required to store VL data. -
Description: -
H5Dvlen_get_buf_size determines the number of bytes - required to store the VL data from the dataset, using the - space_id for the selection in the dataset on - disk and the type_id for the memory representation - of the VL data in memory. -

- *size is returned with the number of bytes - required to store the VL data in memory. -

Parameters: -
    - - - - - - - - - - - - -
    hid_t dataset_id    IN: Identifier of the dataset to query.
    hid_t type_idIN: Datatype identifier.
    hid_t space_idIN: Dataspace identifier.
    hsize_t *sizeOUT: The size in bytes of the memory - buffer required to store the VL data.
-
Returns: -
Returns non-negative value if successful; - otherwise returns a negative value. -
Fortran90 Interface: h5dvlen_get_max_len_f -
There is no direct FORTRAN couterpart for the C function - H5Dvlen_get_buf_size; - corresponding functionality is provided by the FORTRAN function - h5dvlen_get_max_len_f. -
-
-SUBROUTINE h5dvlen_get_max_len_f(dset_id, size, hdferr)
-  IMPLICIT NONE
-  INTEGER(HID_T), INTENT(IN) :: dset_id     ! Dataset identifier  
-  INTEGER(HID_T), INTENT(IN) :: type_id     ! Datatype identifier  
-  INTEGER(HID_T), INTENT(IN) :: space_id    ! Dataspace identifier  
-            
-  INTEGER(SIZE_T), INTENT(OUT)  :: elem_len ! Maximum length of the element
-  INTEGER, INTENT(OUT) :: hdferr            ! Error code  
-                                            ! 0 on success and -1 on failure
-END SUBROUTINE h5dvlen_get_max_len_f
-	
- - -
- - - -
-
-
Name: H5Dvlen_reclaim -
Signature: -
herr_t H5Dvlen_reclaim(hid_t type_id, - hid_t space_id, - hid_t plist_id, - void *buf - ) -
Purpose: -
Reclaims VL datatype memory buffers. -
Description: -
H5Dvlen_reclaim reclaims memory buffers created to - store VL datatypes. -

- The type_id must be the datatype stored in the buffer. - The space_id describes the selection for the memory buffer - to free the VL datatypes within. - The plist_id is the dataset transfer property list which - was used for the I/O transfer to create the buffer. - And buf is the pointer to the buffer to be reclaimed. -

- The VL structures (hvl_t) in the user's buffer are - modified to zero out the VL information after the memory has been reclaimed. -

- If nested VL datatypes were used to create the buffer, - this routine frees them from the bottom up, releasing all - the memory without creating memory leaks. -

Parameters: -
    - - - - - - - - - - - - -
    hid_t type_idIN: Identifier of the datatype.
    hid_t space_id    IN: Identifier of the dataspace.
    hid_t plist_idIN: Identifier of the property list used to create the buffer.
    void *bufIN: Pointer to the buffer to be reclaimed.
-
Returns: -
Returns non-negative value if successful; - otherwise returns a negative value. -
Fortran90 Interface: -
None. - - - -
- - - -
-
-
Name: H5Dwrite -
Signature: -
herr_t H5Dwrite(hid_t dataset_id, - hid_t mem_type_id, - hid_t mem_space_id, - hid_t file_space_id, - hid_t xfer_plist_id, - const void * buf - ) -
Purpose: -
Writes raw data from a buffer to a dataset. -
Description: -
H5Dwrite writes a (partial) dataset, specified by its - identifier dataset_id, from the - application memory buffer buf into the file. - Data transfer properties are defined by the argument - xfer_plist_id. - The memory datatype of the (partial) dataset is identified by - the identifier mem_type_id. - The part of the dataset to write is defined by - mem_space_id and file_space_id. -

- file_space_id is used to specify only the selection within - the file dataset's dataspace. Any dataspace specified in file_space_id - is ignored by the library and the dataset's dataspace is always used. - file_space_id can be the constant H5S_ALL. - which indicates that the entire file dataspace, as defined by the - current dimensions of the dataset, is to be selected. -

- mem_space_id is used to specify both the memory dataspace - and the selection within that dataspace. - mem_space_id can be the constant H5S_ALL, - in which case the file dataspace is used for the memory dataspace and - the selection defined with file_space_id is used for the - selection within that dataspace. -

- The behavior of the library for the various combinations of valid - dataspace IDs and H5S_ALL for the mem_space_id and the - file_space_id parameters is described below: - -

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- mem_space_id   - - file_space_id   - - Behavior -
- valid dataspace identifier - - valid dataspace identifier - - mem_space_id specifies the memory dataspace and the - selection within it. - file_space_id specifies the selection within the file - dataset's dataspace. -
- H5S_ALL - - valid dataspace identifier - - The file dataset's dataspace is used for the memory dataspace and the - selection specified with file_space_id specifies the - selection within it. - The combination of the file dataset's dataspace and the selection from - file_space_id is used for memory also. -
- valid dataspace identifier - - H5S_ALL - - mem_space_id specifies the memory dataspace and the - selection within it. - The selection within the file dataset's dataspace is set to the "all" - selection. -
- H5S_ALL - - H5S_ALL - - The file dataset's dataspace is used for the memory dataspace and the - selection within the memory dataspace is set to the "all" selection. - The selection within the file dataset's dataspace is set to the "all" - selection. -
- -

- Setting an "all" selection indicates that the entire dataspace, as - defined by the current dimensions of a dataspace, will be selected. - The number of elements selected in the memory dataspace must match the - number of elements selected in the file dataspace. -

- xfer_plist_id can be the constant H5P_DEFAULT. - in which case the default data transfer properties are used. -

- Writing to an dataset will fail if the HDF5 file was - not opened with write access permissions. -

- Data is automatically converted from the memory datatype - and dataspace to the file datatype and dataspace - at the time of the write. See the - Data Conversion - section of The Data Type Interface (H5T) in the - HDF5 User's Guide for a discussion of - data conversion, including the range of conversions currently - supported by the HDF5 libraries. -

- If the dataset's space allocation time is set to - H5D_ALLOC_TIME_LATE or H5D_ALLOC_TIME_INCR - and the space for the dataset has not yet been allocated, - that space is allocated when the first raw data is written to the - dataset. - Unused space in the dataset will be written with fill values at the - same time if the dataset's fill time is set to - H5D_FILL_TIME_IFSET or H5D_FILL_TIME_ALLOC. - (Also see - H5Pset_fill_time - and - H5Pset_alloc_time.) -

- If a dataset's storage layout is 'compact', care must be taken when - writing data to the dataset in parallel. A compact dataset's raw data - is cached in memory and may be flushed to the file from any of the - parallel processes, so parallel applications should always attempt to - write identical data to the dataset from all processes. - -

Parameters: -
    - - - - - - - - - - - - - - - - - - -
    hid_t dataset_idIN: Identifier of the dataset to write to.
    hid_t mem_type_idIN: Identifier of the memory datatype.
    hid_t mem_space_idIN: Identifier of the memory dataspace.
    hid_t file_space_id    IN: Identifier of the dataset's dataspace in the file.
    hid_t xfer_plist_idIN: Identifier of a transfer property list - for this I/O operation.
    const void * bufIN: Buffer with data to be written to the file.
-
Returns: -
Returns a non-negative value if successful; - otherwise returns a negative value. -
Fortran90 Interface: h5dwrite_f - -
-
-SUBROUTINE h5dwrite_f(dset_id, mem_type_id, buf, dims, hdferr, & 
-                      mem_space_id, file_space_id, xfer_prp)
-
-  IMPLICIT NONE
-  INTEGER(HID_T), INTENT(IN) :: dset_id      ! Dataset identifier
-  INTEGER(HID_T), INTENT(IN) :: mem_type_id  ! Memory datatype identifier
-  TYPE, INTENT(IN) :: buf                    ! Data buffer; may be a scalar 
-                                             ! or an array
- -
-  DIMENSION(*), INTEGER(HSIZE_T), INTENT(IN)  :: dims 
-                                             ! Array to hold corresponding 
-                                             ! dimension sizes of data 
-                                             ! buffer buf; dim(k) has value 
-                                             ! of the k-th dimension of 
-                                             ! buffer buf; values are 
-                                             ! ignored if buf is a scalar
-  INTEGER, INTENT(OUT) :: hdferr             ! Error code 
-                                             ! 0 on success and -1 on failure
-  INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id 
-                                             ! Memory dataspace identfier 
-                                             ! Default value is H5S_ALL_F
-  INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id 
-                                             ! File dataspace identfier 
-                                             ! Default value is H5S_ALL_F
-  INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp 
-                                             ! Transfer property list 
-                                             ! identifier; default value 
-                                             ! is H5P_DEFAULT_F 
-            
-END SUBROUTINE h5dwrite_f
-	
- - - -
- -
-
- - - -
-HDF5 documents and links 
-Introduction to HDF5 
-HDF5 User Guide 
- -
-And in this document, the -HDF5 Reference Manual   -
-H5IM   -H5LT   -H5PT   -H5TB   -
-H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5I   -H5P   -
-H5R   -H5S   -H5T   -H5Z   -Tools   -Datatypes   -
-
-
-HDF Help Desk -
-Describes HDF5 Release 1.7, the unreleased development branch; working toward HDF5 Release 1.8.0 -
- - - diff --git a/doc/html/RM_H5E.html b/doc/html/RM_H5E.html deleted file mode 100644 index 3678b18..0000000 --- a/doc/html/RM_H5E.html +++ /dev/null @@ -1,1689 +0,0 @@ - - -HDF5/H5E API Specification - - - - - - - - - - - - - -
-
- - - -
-HDF5 documents and links 
-Introduction to HDF5 
-HDF5 User Guide 
- -
-And in this document, the -HDF5 Reference Manual   - -
-H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5I   -H5P   -
-H5R   -H5S   -H5T   -H5Z   -Tools   -Datatypes   -
-
-
-

H5E: Error Interface

-
- -

Error API Functions

- -These functions provide error handling capabilities in the HDF5 environment. - -

-The C Interfaces: - - - -
- -       - -       - -
-
- -Alphabetical Listing - - - - - - - - - - - - - -
- -        - -        - -
- -
-The FORTRAN90 Interfaces: -
-In general, each FORTRAN90 subroutine performs exactly the same task -as the corresponding C function. -
- - - -
- -       - -       - -
- -

-The Error interface provides error handling in the form of a stack. -The FUNC_ENTER() macro clears the error stack whenever -an interface function is entered. -When an error is detected, an entry is pushed onto the stack. -As the functions unwind, additional entries are pushed onto the stack. -The API function will return some indication that an error occurred and -the application can print the error stack. -

-Certain API functions in the H5E package, such as H5Eprint, -do not clear the error stack. Otherwise, any function which -does not have an underscore immediately after the package name -will clear the error stack. For instance, H5Fopen -clears the error stack while H5F_open does not. -

-An error stack has a fixed maximum size. -If this size is exceeded then the stack will be truncated and only the -inner-most functions will have entries on the stack. -This is expected to be a rare condition. -

-Each thread has its own error stack, but since -multi-threading has not been added to the library yet, this -package maintains a single error stack. The error stack is -statically allocated to reduce the complexity of handling -errors within the H5E package. - - - - - -


-
-
Name: H5Eauto_is_stack -
Signature: -
herr_t H5Eauto_is_stack(hid_t - estack_id, unsigned *is_stack) -
Purpose: -
Determines type of error stack. -
Description: -
H5Eauto_is_stack determines whether the error auto - reporting function for an error stack conforms to the - H5E_auto_stack_t typedef or the - H5E_auto_t typedef. -

- The is_stack parameter is set to 11 - if the error stack conforms to H5E_auto_stack_t - and 0 for if H5E_auto_t. -

Parameters: -
    - - - - - - -
    hid_t estack_idThe error stack identifier
    unsignedis_stack    A flag indicating which error stack typedef - the specified error stack conforms to.
-
Returns: -
Returns a non-negative value if successful; - otherwise returns a negative value. -
Fortran90 Interface: -
- - - - -
-
-
Name: H5Eclear -
Signature (deprecated): -
herr_t H5Eclear(void) -
Purpose: -
Clears the error stack for the current thread. -
Description: -
H5Eclear clears the error stack for the current thread. -

- The stack is also cleared whenever an API function is called, - with certain exceptions (for instance, H5Eprint). -

- H5Eclear can fail if there are problems initializing - the library. -

- Note: - As of HDF5 Release 1.8, H5Eclear_stack - replaces H5Eclear and H5Eclear is designated - a deprecated function. H5Eclear may be removed - from the library at a future release. -

Parameters: -
    - -
    None
-
Returns: -
Returns a non-negative value if successful; - otherwise returns a negative value. -
Fortran90 Interface: h5eclear_f -
-
-SUBROUTINE h5eclear_f(hdferr) 
-  IMPLICIT NONE
-  INTEGER, INTENT(OUT) :: hdferr  ! Error code
-
-END SUBROUTINE h5eclear_f
-	
- - -
- - - - -
-
-
Name: H5Eclear_stack -
Signature: -
herr_t H5Eclear_stack(hid_t estack_id) -
Purpose: -
Clears the error stack for the current thread. -
Description: -
H5Eclear_stack clears the error stack specified - by estack_id for the current thread. -

- If the value of estack_id is H5E_DEFAULT, - the current current error stack will be cleared. -

- The current error stack is also cleared whenever an API function - is called, with certain exceptions - (for instance, H5Eprint). -

- H5Eclear_stack can fail if there are problems initializing - the library. -

Parameters: -
    - - - -
    hid_t mesg_id    IN: Error message identifier.
-
Returns: -
Returns a non-negative value if successful; - otherwise returns a negative value. -
Fortran90 Interface: -
None. - - -
- - - - -
-
-
Name: H5Eclose_msg -
Signature: -
herr_t H5Eclose_msg(hid_t - mesg_id) -
Purpose: -
Closes an error message identifier. -
Description: -
H5Eclose_msg closes an error message identifier., - which can be either a major or minor message. -
Parameters: -
    - - - -
    hid_t mesg_id    IN: Error message identifier.
-
Returns: -
Returns a non-negative value on success; - otherwise returns a negative value. -
Fortran90 Interface: -
None. -
- - - -
-
-
Name: H5Eclose_stack -
Signature: -
herr_t H5Eclose_stack(hid_t - estack_id) -
Purpose: -
Closes object handle for error stack. -
Description: -
H5Eclose_stack closes the object handle for an - error stack and releases its resources. H5E_DEFAULT - cannot be closed. -
Parameters: -
    - - - -
    hid_t estack_id    IN: Error stack identifier.
-
Returns: -
Returns a non-negative value on success; - otherwise returns a negative value. -
Fortran90 Interface: -
None. -
- - - -
-
-
Name: H5Ecreate_msg -
Signature: -
hid_t H5Ecreate_msg(hid_t - class, H5E_type_t msg_type, - const char* mesg) -
Purpose: -
Add major error message to an error class. -
Description: -
H5Ecreate_msg adds an error message to an error class - defined by client library or application program. The error message - can be either major or minor which is indicated - by parameter msg_type. -
Parameters: -
    - - - - - - - - - -
    hid_t class    IN: Error class identifier.
    H5E_type_t msg_type    IN: The type of the error message. -
    - Valid values are H5E_MAJOR and - H5E_MINOR.
    const charmesg    IN: Major error message.
-
Returns: -
Returns a message identifier on success; - otherwise returns a negative value. -
Fortran90 Interface: -
None. -
- - - - -
-
-
Name: H5Eget_auto -
Signature (deprecated): -
herr_t H5Eget_auto(H5E_auto_t * func, - void **client_data - ) -
Purpose: -
Returns the current settings for the automatic error stack - traversal function and its data. -
Description: -
H5Eget_auto returns the current settings for the - automatic error stack traversal function, func, - and its data, client_data. Either (or both) - arguments may be null in which case the value is not returned. -

- Note: - As of HDF5 Release 1.8, H5Eget_auto_stack - replaces H5Eget_auto and H5Eget_auto is designated - a deprecated function. H5Eget_auto may be removed - from the library at a future release. -

Parameters: -
    - - - - - - -
    H5E_auto_t * funcOUT: Current setting for the function to be called upon an - error condition.
    void **client_data    OUT: Current setting for the data passed to the error function.
-
Returns: -
Returns a non-negative value if successful; - otherwise returns a negative value. -
Fortran90 Interface: -
None. - - - - -
- - - - -
-
-
Name: H5Eget_auto_stack -
Signature: -
herr_t H5Eget_auto_stack( - hid_t estack_id, - H5E_auto_stack_t * func, - void **client_data - ) -
Purpose: -
Returns the current settings for the automatic error stack - traversal function and its data. -
Description: -
H5Eget_auto_stack returns the current settings for the - automatic error stack traversal function, func, - and its data, client_data, that are associated with - the error stack specified by estack_id. -

- Either or both of the func and client_data - arguments may be null, in which case the value is not returned. -

Parameters: -
    - - - - - - - - - -
    hid_t estack_id -     IN: Error stack identifier. - H5E_DEFAULT indicates the current stack.
    H5E_auto_stack_t * funcOUT: The function currently set to be - called upon an error condition.
    void **client_data    OUT: Data currently set to be passed - to the error function.
-
Returns: -
Returns a non-negative value if successful; - otherwise returns a negative value. -
Fortran90 Interface: -
None. - - - - -
- - - - -
-
-
Name: H5Eget_class_name -
Signature: -
ssize_t H5Eget_class_name(hid_t - class_id, char* name, - size_t size) -
Purpose: -
Retrieves error class name. -
Description: -
H5Eget_class_name retrieves the name of the error class - specified by the class identifier. - If non-NULL pointer is passed in for name and - size is greater than zero, the class - name of size long is returned. The length of the error - class name is also returned. - If NULL is passed in as name, only the length of - class name is returned. If zero is returned, it means no name. - User is responsible for allocated enough buffer for the name. -
Parameters: -
    - - - - - - - - - -
    hid_t class_id    IN: Error class identifier.
    charname    OUT: The name of the class to be queried.
    size_t size    IN: The length of class name to be returned - by this function.
-
Returns: -
Returns non-negative value as on success; - otherwise returns negative value. -
Fortran90 Interface: -
None. -
- - - -
-
-
Name: H5Eget_current_stack -
Signature: -
hid_t H5Eget_current_stack(void) -
Purpose: -
Registers the current error stack. -
Description: -
H5Eget_current_stack registers the current error stack, - returns an object identifier, and clears the current error stack. - An empty error stack will also be assigned an identifier. -
Parameters: -
    - -
    None.
-
Returns: -
Returns the identifier of the current error stack on success; - otherwise returns a negative value. -
Fortran90 Interface: -
None. -
- - - -
-
-
Name: H5Eget_major -
Signature (deprecated): -
const char * H5Eget_major(H5E_major_t n) -
Purpose: -
Returns a character string describing an error specified by a - major error number. -
Description: -
Given a major error number, H5Eget_major returns a - constant character string that describes the error. -

- Note: - As of HDF5 Release 1.8, H5Eget_msg - replaces H5Eget_major and H5Eget_major is designated - a deprecated function. H5Eget_major may be removed - from the library at a future release. -

Parameters: -
    - - - -
    H5E_major_t n    IN: Major error number.
-
Returns: -
Returns a character string describing the error if successful. - Otherwise returns "Invalid major error number." -
Fortran90 Interface: h5eget_major_f -
-
-SUBROUTINE h5eget_major_f(error_no, name, hdferr)
-  INTEGER, INTENT(IN) :: error_no         !Major error number
-  CHARACTER(LEN=*), INTENT(OUT) :: name   ! File name
-  INTEGER, INTENT(OUT) :: hdferr          ! Error code
-
-END SUBROUTINE h5eget_major_f
-	
- - -
- - - -
-
-
Name: H5Eget_minor -
Signature (deprecated): -
const char * H5Eget_minor(H5E_minor_t n) -
Purpose: -
Returns a character string describing an error specified by a - minor error number. -
Description: -
Given a minor error number, H5Eget_minor returns a - constant character string that describes the error. -

- Note: - As of HDF5 Release 1.8, H5Eget_msg - replaces H5Eget_minor and H5Eget_minor is designated - a deprecated function. H5Eget_minor may be removed - from the library at a future release. -

Parameters: -
    - - - -
    H5E_minor_t n    IN: Minor error number.
-
Returns: -
Returns a character string describing the error if successful. - Otherwise returns "Invalid minor error number." -
Fortran90 Interface: h5eget_minor_f -
-
-SUBROUTINE h5eget_minor_f(error_no, name, hdferr)
-  INTEGER, INTENT(IN) :: error_no         !Major error number
-  CHARACTER(LEN=*), INTENT(OUT) :: name   ! File name
-  INTEGER, INTENT(OUT) :: hdferr          ! Error code
-
-END SUBROUTINE h5eget_minor_f
-	
- - -
- - - -
-
-
Name: H5Eget_msg -
Signature: -
ssize_t H5Eget_msg(hid_t - mesg_id, H5E_type_t* mesg_type, - char* mesg, size_t size) -
Purpose: -
Retrieves an error message. -
Description: -
H5Eget_msg retrieves the error message including its - length and type. The error message is specified by mesg_id. - User is responsible for passing in enough buffer for the message. - If mesg is not NULL and size is greater than zero, - the error message of size long is returned. The length of the - message is also returned. If NULL is passed in as mesg, only the - length and type of the message is returned. If the return value is zero, - it means no message. -
Parameters: -
    - - - - - - - - - - - - -
    hid_t mesg_id    IN: Idenfier for error message to be queried.
    H5E_type_tmesg_type    OUT: The type of the error message. -
    - Valid values are H5E_MAJOR and - H5E_MINOR.
    charmesg    OUT: Error message buffer.
    size_t size    IN: The length of error message to be returned - by this function.
-
Returns: -
Returns the size of the error message in bytes on success; - otherwise returns a negative value. -
Fortran90 Interface: -
None. -
- - - -
-
-
Name: H5Eget_num -
Signature: -
ssize_t H5Eget_num(hid_t estack_id) -
Purpose: -
Retrieves the number of error messages in an error stack. -
Description: -
H5Eget_num retrieves the number of error records - in the error stack specified by estack_id - (including major, minor messages and description). -
Parameters: -
    - - - -
    hid_t estack_id -     IN: Error stack identifier.
-
Returns: -
Returns a non-negative value on success; - otherwise returns a negative value. -
Fortran90 Interface: -
None. -
- - - -
-
-
Name: H5Epop -
Signature: -
herr_t H5Epop(hid_t - estack_id, size_t count) -
Purpose: -
Deletes specified number of error messages from the error stack. -
Description: -
H5Epop deletes the number of error records specified - in count from the top of the error stack - specified by estack_id - (including major, minor messages and description). - The number of error messages to be deleted is specified by count. -
Parameters: -
    - - - - - - -
    hid_t estack_id -     IN: Error stack identifier.
    size_t countIN: The number of error messages to be deleted - from the top of error stack.
-
Returns: -
Returns a non-negative value on success; - otherwise returns a negative value. -
Fortran90 Interface: -
None. -
- - - - -
-
-
Name: H5Eprint -
Signature (deprecated): -
herr_t H5Eprint(FILE * stream) -
Purpose: -
Prints the error stack in a default manner. -
Description: -
H5Eprint prints the error stack on the specified - stream, stream. - Even if the error stack is empty, a one-line message will be printed: -
     - HDF5-DIAG: Error detected in thread 0. -

- H5Eprint is a convenience function for - H5Ewalk with a function that prints error messages. - Users are encouraged to write their own more specific error handlers. -

- Note: - As of HDF5 Release 1.8, H5Eprint_stack - replaces H5Eprint and H5Eprint is designated - a deprecated function. H5Eprint may be removed - from the library at a future release. -

Parameters: -
    - - - -
    FILE * stream    IN: File pointer, or stderr if NULL.
-
Returns: -
Returns a non-negative value if successful; - otherwise returns a negative value. -
Fortran90 Interface: h5eprint_f -
-
-SUBROUTINE h5eprint_f(hdferr, name)
-  CHARACTER(LEN=*), OPTIONAL, INTENT(IN) :: name ! File name
-  INTEGER, INTENT(OUT) :: hdferr                 ! Error code
-
-END SUBROUTINE h5eprint_f
-	
- - -
- - - - -
-
-
Name: H5Eprint_stack -
Signature: -
herr_t H5Eprint( - hid_t estack_id, - FILE * stream) -
Purpose: -
Prints the error stack in a default manner. -
Description: -
H5Eprint_stack prints the error stack specified by - estack_id on the specified stream, stream. - Even if the error stack is empty, a one-line message of the - following form will be printed: -
     - HDF5-DIAG: Error detected in HDF5 library version: 1.5.62 - thread 0. -

- A similar line will appear before the error messages of each - error class stating the library name, library version number, and - thread identifier. -

- If estack_id is H5E_DEFAULT, - the current error stack will be printed. -

- H5Eprint_stack is a convenience function for - H5Ewalk_stack with a function that prints error messages. - Users are encouraged to write their own more specific error handlers. -

Parameters: -
    - - - - - - -
    hid_t estack_idIN: Identifier of the error stack to be printed. - If the identifier is H5E_DEFAULT, - the current error stack will be printed.
    FILE * stream    IN: File pointer, or stderr if NULL.
-
Returns: -
Returns a non-negative value if successful; - otherwise returns a negative value. -
Fortran90 Interface: -
None. - - - -
- - - - -
-
-
Name: H5Epush -
Signature (deprecated): -
herr_t H5Epush( - const char *file, - const char *func, - unsigned line, - H5E_major_t maj_num, - H5E_minor_t min_num, - const char *str - ) -
Purpose: -
Pushes new error record onto error stack. -
Description: -
H5Epush pushes a new error record onto the - error stack for the current thread. -

- The error has major and minor numbers maj_num and - min_num, - the function func where the error was detected, - the name of the file file where the error was detected, - the line line within that file, - and an error description string str. -

- The function name, filename, and error description strings - must be statically allocated. -

- Note: - As of HDF5 Release 1.8, H5Epush_stack - replaces H5Epush and H5Epush is designated - a deprecated function. H5Epush may be removed - from the library at a future release. -

Parameters, H5Epush: -
    - - - - - - - - - - - - - - - - - - -
    const char *fileIN: Name of the file in which the error - was detected.
    const char *funcIN: Name of the function in which the error - was detected.
    unsigned lineIN: Line within the file at which the error - was detected.
    H5E_major_t maj_num    IN: Major error number.
    H5E_minor_t min_numIN: Minor error number.
    const char *strIN: Error description string.
-
Returns: -
Returns a non-negative value if successful; - otherwise returns a negative value. -
Fortran90 Interface: -
None. - - - - -
- - - -
-
-
Name: H5Epush_stack -
Signature: -
herr_t H5Epush_stack( - hid_t estack_id, - const char *file, - const char *func, - unsigned line, - hid_t class_id, - hid_t major_id, - hid_t minor_id, - const char *msg, - ...) -
Purpose: -
Pushes new error record onto error stack. -
Description: -
H5Epush_stack pushes a new error record onto the - error stack for the current thread. -

- The error record contains - the error class identifier class_id, - the major and minor message identifiers major_id and - minor_id, - the function name func where the error was detected, - the filename file and line number line - within that file where the error was detected, and - an error description msg. -

- The major and minor errors must be in the same error class. -

- The function name, filename, and error description strings - must be statically allocated. -

- msg can be a format control string with - additional arguments. This design of appending additional arguments - is similar to the system and C functions printf and - fprintf. -

Parameters: -
    - - - - - - - - - - - - - - - - - - - - - - - - -
    hid_t estack_idIN: Identifier of the error stack to which - the error record is to be pushed. - If the identifier is H5E_DEFAULT, the error record - will be pushed to the current stack.
    const char *fileIN: Name of the file in which the error was - detected.
    const char *funcIN: Name of the function in which the error was - detected.
    unsigned lineIN: Line number within the file at which the - error was detected.
    hid_t class_idIN: Error class identifier.
    hid_t major_id    IN: Major error identifier.
    hid_t minor_idIN: Minor error identifier.
    const char *msgIN: Error description string.
-
Returns: -
Returns a non-negative value if successful; - otherwise returns a negative value. -
Fortran90 Interface: -
None. - - - - -
- - - -
-
-
Name: H5Eregister_class -
Signature: -
hid_t H5Eregister_class(const char* - cls_name, const char* lib_name, - const char* version) -
Purpose: -
Registers a client library or application program to HDF5 error API. -
Description: -
H5Eregister_class registers a client library or - application program to HDF5 error API so that the client library - or application program can report error together with HDF5 library. - It receives an identifier for this error class for further error - operations. The library name and version number will - be printed out in the error message as preamble. -
Parameters: -
    - - - - - - - - - -
    const charcls_name    IN: Name of the error class.
    const char* lib_nameIN: Name of the client library or application - to which the error class belongs.
    const char* versionIN: Version of the client library or application - to which the error class belongs. - A NULL can be passed in.
-
Returns: -
Returns a class identifier on success; - otherwise returns a negative value. -
Fortran90 Interface: -
None. -
- - - - -
-
-
Name: H5Eset_auto -
Signature (deprecated): -
herr_t H5Eset_auto(H5E_auto_t func, - void *client_data - ) -
Purpose: -
Turns automatic error printing on or off. -
Description: -
H5Eset_auto turns on or off automatic printing of - errors. When turned on (non-null func pointer), - any API function which returns an error indication will - first call func, passing it client_data - as an argument. -

- When the library is first initialized the auto printing function - is set to H5Eprint (cast appropriately) and - client_data is the standard error stream pointer, - stderr. -

- Automatic stack traversal is always in the - H5E_WALK_DOWNWARD direction. -

- Note: - As of HDF5 Release 1.8, H5Eset_auto_stack - replaces H5Eset_auto and H5Eset_auto is designated - a deprecated function. H5Eset_auto may be removed - from the library at a future release. -

Parameters: -
    - - - - - - -
    H5E_auto_t funcIN: Function to be called upon an error condition.
    void *client_data    IN: Data passed to the error function.
-
Returns: -
Returns a non-negative value if successful; - otherwise returns a negative value. -
Fortran90 Interface: h5eset_auto_f -
-
-SUBROUTINE h5eset_auto_f(printflag, hdferr)
-  INTEGER, INTENT(IN) :: printflag  !flag to turn automatic error
-                                    !printing on or off
-                                    !possible values are:
-                                    !printon (1)
-                                    !printoff(0)
-  INTEGER, INTENT(OUT) :: hdferr    ! Error code
-
-END SUBROUTINE h5eset_auto_f
-	
- - -
- - - - - -
-
-
Name: H5Eset_auto_stack -
Signature: -
herr_t H5Eset_auto_stack( - hid_t estack_id, - H5E_auto_stack_t func, - void *client_data - ) -
Purpose: -
Turns automatic error printing on or off. -
Description: -
H5Eset_auto_stack turns on or off automatic printing of - errors for the error stack specified with estack_id. - An estack_id value of H5E_DEFAULT - indicates the current stack. -

- When automatic printing is turned on, - by the use of a non-null func pointer, - any API function which returns an error indication will - first call func, passing it client_data - as an argument. -

- When the library is first initialized, the auto printing function - is set to H5Eprint_stack (cast appropriately) and - client_data is the standard error stream pointer, - stderr. -

- Automatic stack traversal is always in the - H5E_WALK_DOWNWARD direction. -

- Automatic error printing is turned off with a - H5Eset_auto_stack call with a NULL - func pointer. -

Parameters: -
    - - - - - - - - - -
    hid_t estack_id    IN: Error stack identifier.
    H5E_auto_stack_t funcIN: Function to be called upon an error - condition.
    void *client_data    IN: Data passed to the error function.
-
Returns: -
Returns a non-negative value if successful; - otherwise returns a negative value. -
Fortran90 Interface: h5eset_auto_f -
None. - - - -
- - - - -
-
-
Name: H5Eset_current_stack -
Signature: -
herr_t H5Eset_current_stack(hid_t - estack_id) -
Purpose: -
Replaces the current error stack. -
Description: -
H5Eset_current_stack replaces the content of - the current error stack with a copy of the content of error stack - specified by estack_id. -
Parameters: -
    - - - -
    hid_t estack_id    IN: Error stack identifier.
-
Returns: -
Returns a non-negative value on success; - otherwise returns a negative value. -
Fortran90 Interface: -
None. -
- - - -
-
-
Name: H5Eunregister_class -
Signature: -
herr_t H5Eunregister_class(hid_t - class_id) -
Purpose: -
Removes an error class. -
Description: -
H5Eunregister_class removes the error class specified - by class_id. - All the major and minor errors in this class will also be closed. -
Parameters: -
    - - - -
    hid_t class_id    IN: Error class identifier.
-
Returns: -
Returns a non-negative value on success; - otherwise returns a negative value. -
Fortran90 Interface: -
None. -
- - - - -
-
-
Name: H5Ewalk -
Signature (deprecated): -
herr_t H5Ewalk(H5E_direction_t direction, - H5E_walk_t func, - void * client_data - ) -
Purpose: -
Walks the error stack for the current thread, calling a specified - function. -
Description: -
H5Ewalk walks the error stack for the current thread - and calls the specified function for each error along the way. -

- direction determines whether the stack is walked - from the inside out or the outside in. - A value of H5E_WALK_UPWARD means begin with the - most specific error and end at the API; - a value of H5E_WALK_DOWNWARD means to start at the - API and end at the inner-most function where the error was first - detected. -

- func will be called for each error in the error stack. - Its arguments will include an index number (beginning at zero - regardless of stack traversal direction), an error stack entry, - and the client_data pointer passed to - H5E_print. - The H5E_walk_t prototype is as follows: -

-

- typedef herr_t (*H5E_walk_t)(int n, - H5E_error_t *err_desc, - void *client_data) -

- where the parameters have the following meanings: -

-
int n -
Indexed position of the error in the stack. -
H5E_error_t *err_desc -
Pointer to a data structure describing the error. - (This structure is currently described only in the - source code file hdf5/src/H5Epublic.h. - That file also contains the definitive list of major - and minor error codes. That information will - eventually be presented as an appendix to this - Reference Manual.) -
void *client_data -
Pointer to client data in the format expected by - the user-defined function. -
-
-

- H5Ewalk can fail if there are problems initializing - the library. -

- Note: - As of HDF5 Release 1.8, H5Ewalk_stack - replaces H5Ewalk and H5Ewalk is designated - a deprecated function. H5Ewalk may be removed - from the library at a future release. -

Parameters: -
    - - - - - - - - - -
    H5E_direction_t direction    IN: Direction in which the error stack is to be walked.
    H5E_walk_t funcIN: Function to be called for each error encountered.
    void * client_dataIN: Data to be passed with func.
-
Returns: -
Returns a non-negative value if successful; - otherwise returns a negative value. -
Fortran90 Interface: -
None. - - - - -
- - - - -
-
-
Name: H5Ewalk_stack -
Signature: -
herr_t H5Ewalk_stack( - hid_t estack_id, - H5E_direction_t direction, - H5E_walk_t func, - void * client_data - ) -
Purpose: -
Walks the error stack for the current thread, calling a specified - function. -
Description: -
H5Ewalk_stack walks the error stack specified by - estack_id for the current thread and calls the function - specified in func for each error along the way. -

- If the value of estack_id is H5E_DEFAULT, - then H5Ewalk_stack walks the current error stack. -

- direction specifies whether the stack is walked - from the inside out or the outside in. - A value of H5E_WALK_UPWARD means to begin with the - most specific error and end at the API; - a value of H5E_WALK_DOWNWARD means to start at the - API and end at the innermost function where the error was first - detected. -

- func, a function compliant with the - H5E_walk_t prototype, will be called for each error - in the error stack. - Its arguments will include an index number n - (beginning at zero regardless of stack traversal direction), - an error stack entry err_desc, - and the client_data pointer passed to - H5E_print. - The H5E_walk_t prototype is as follows: -

-

- typedef herr_t (*H5E_walk_t)(int n, - H5E_error_t *err_desc, - void *client_data) -

- where the parameters have the following meanings: -

-
int n -
Indexed position of the error in the stack. -
H5E_error_t *err_desc -
Pointer to a data structure describing the error. - (This structure is currently described only in the source - code file hdf5/src/H5Epublic.h. - That file also contains the definitive list of major - and minor error codes; that information will - eventually be presented as an appendix to this - HDF5 Reference Manual.) -
void *client_data -
Pointer to client data in the format expected by - the user-defined function. -
-
-

- H5Ewalk_stack can fail if there are problems initializing - the library. -

Parameters: -
    - - - - - - - - - - - - -
    hid_t estack_idIN: Error stack identifier.
    H5E_direction_t direction    IN: Direction in which the error stack is - to be walked.
    H5E_walk_t funcIN: Function to be called for each error - encountered.
    void * client_dataIN: Data to be passed with func. -
-
Returns: -
Returns a non-negative value if successful; - otherwise returns a negative value. -
Fortran90 Interface: -
None. - - - - -
- - - - -
-
-
Name: H5Ewalk_cb -
Signature: -
herr_t H5Ewalk_cb(int n, - H5E_error_t *err_desc, - void *client_data - ) -
Purpose: -
Default error stack traversal callback function - that prints error messages to the specified output stream. -
Description: -
H5Ewalk_cb is a default error stack traversal callback - function that prints error messages to the specified output stream. - It is not meant to be called directly but rather as an - argument to the H5Ewalk function. - This function is called also by H5Eprint. - Application writers are encouraged to use this function as a - model for their own error stack walking functions. -

- n is a counter for how many times this function - has been called for this particular traversal of the stack. - It always begins at zero for the first error on the stack - (either the top or bottom error, or even both, depending on - the traversal direction and the size of the stack). -

- err_desc is an error description. It contains all the - information about a particular error. -

- client_data is the same pointer that was passed as the - client_data argument of H5Ewalk. - It is expected to be a file pointer (or stderr if NULL). -

Parameters: -
    - - - - - - - - - -
    int nIN/OUT: Number of times this function has been called - for this traversal of the stack.
    H5E_error_t *err_desc    OUT: Error description.
    void *client_dataIN: A file pointer, or stderr if NULL.
-
Returns: -
Returns a non-negative value if successful; - otherwise returns a negative value. -
Fortran90 Interface: -
None. - - - - -
- -
-
- - - -
-HDF5 documents and links 
-Introduction to HDF5 
-HDF5 User Guide 
- -
-And in this document, the -HDF5 Reference Manual   -
-H5IM   -H5LT   -H5PT   -H5TB   -
-H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5I   -H5P   -
-H5R   -H5S   -H5T   -H5Z   -Tools   -Datatypes   -
-
-
-HDF Help Desk -
-Describes HDF5 Release 1.7, the unreleased development branch; working toward HDF5 Release 1.8.0 -
- - - diff --git a/doc/html/RM_H5F.html b/doc/html/RM_H5F.html deleted file mode 100644 index b3cbb33..0000000 --- a/doc/html/RM_H5F.html +++ /dev/null @@ -1,1970 +0,0 @@ - - -HDF5/H5F API Specification - - - - - - - - - - - - - -
-
- - - -
-HDF5 documents and links 
-Introduction to HDF5 
-HDF5 User Guide 
- -
-And in this document, the -HDF5 Reference Manual   -
-H5IM   -H5LT   -H5PT   -H5TB   -
-H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5I   -H5P   -
-H5R   -H5S   -H5T   -H5Z   -Tools   -Datatypes   -
-
-
-

H5F: File Interface

-
- -

File API Functions

- -These functions are designed to provide file-level access to HDF5 files. -Further manipulation of objects inside a file is performed through one of APIs -documented below. - -

-The C Interfaces: - - - -
- -       - -       - -
-
- -Alphabetical Listing - - - - - - - - - - - - - -
- -        - -        - -
- -
-The FORTRAN90 Interfaces: -
-In general, each FORTRAN90 subroutine performs exactly the same task -as the corresponding C function. -
- - - -
- -       - -       - -
- - - - - -


-
-
Name: H5Fclose -
Signature: -
herr_t H5Fclose(hid_t file_id - ) -
Purpose: -
Terminates access to an HDF5 file. -
Description: -
H5Fclose terminates access to an HDF5 file - by flushing all data to storage and terminating access - to the file through file_id. -

- If this is the last file identifier open for the file - and no other access identifier is open (e.g., a dataset - identifier, group identifier, or shared datatype identifier), - the file will be fully closed and access will end. -

- Delayed close: -
- Note the following deviation from the above-described behavior. - If H5Fclose is called for a file but one or more - objects within the file remain open, those objects will remain - accessible until they are individually closed. - Thus, if the dataset data_sample is open when - H5Fclose is called for the file containing it, - data_sample will remain open and accessible - (including writable) until it is explicitely closed. - The file will be automatically closed once all objects in the - file have been closed. -

- Be warned, hoever, that there are circumstances where it is - not possible to delay closing a file. - For example, an MPI-IO file close is a collective call; all of - the processes that opened the file must close it collectively. - The file cannot be closed at some time in the future by each - process in an independent fashion. - Another example is that an application using an AFS token-based - file access privilage may destroy its AFS token after - H5Fclose has returned successfully. - This would make any future access to the file, or any object - within it, illegal. -

- In such situations, applications must close all open objects - in a file before calling H5Fclose. - It is generally recommended to do so in all cases. -

Parameters: -
    - - - -
    hid_t file_id    IN: Identifier of a file to terminate access to.
-
Returns: -
Returns a non-negative value if successful; - otherwise returns a negative value. -
Fortran90 Interface: h5fclose_f -
-
-SUBROUTINE h5fclose_f(file_id, hdferr) 
-  IMPLICIT NONE
-  INTEGER(HID_T), INTENT(IN) :: file_id ! File identifier
-  INTEGER, INTENT(OUT) :: hdferr        ! Error code
-                                        ! 0 on success and -1 on failure
-END SUBROUTINE h5fclose_f 
-    
- - -
- - - -
-
-
Name: H5Fcreate -
Signature: -
hid_t H5Fcreate(const char *name, - unsigned flags, - hid_t create_id, - hid_t access_id - ) -
Purpose: -
Creates HDF5 files. -
Description: -
H5Fcreate is the primary function for creating - HDF5 files . -

- The flags parameter determines whether an - existing file will be overwritten. All newly created files - are opened for both reading and writing. All flags may be - combined with the bit-wise OR operator (`|') to change - the behavior of the H5Fcreate call. -

- The more complex behaviors of file creation and access - are controlled through the file-creation and file-access - property lists. The value of H5P_DEFAULT for - a property list value indicates that the library should use - the default values for the appropriate property list. -

- The return value is a file identifier for the newly-created file; - this file identifier should be closed by calling - H5Fclose when it is no longer needed. -

- - Special case -- File creation in the case of an - already-open file: -
- If a file being created is already opened, by either a - previous H5Fopen or H5Fcreate call, - the HDF5 library may or may not detect that the open file and - the new file are the same physical file. - (See H5Fopen regarding - the limitations in detecting the re-opening of an already-open - file.) -

- If the library detects that the file is already opened, - H5Fcreate will return a failure, regardless - of the use of H5F_ACC_TRUNC. -

- If the library does not detect that the file is already opened - and H5F_ACC_TRUNC is not used, - H5Fcreate will return a failure because the file - already exists. Note that this is correct behavior. -

- But if the library does not detect that the file is already - opened and H5F_ACC_TRUNC is used, - H5Fcreate will truncate the existing file - and return a valid file identifier. - Such a truncation of a currently-opened file will almost - certainly result in errors. - While unlikely, the HDF5 library may not be able to detect, - and thus report, such errors. -

- Applications should avoid calling H5Fcreate - with an already opened file. - -

Parameters: -
    - - - - - - - - - - - - -
    const char *name    IN: Name of the file to access.
    uintn flagsIN: File access flags. Allowable values are: -
      -
      H5F_ACC_TRUNC -
      Truncate file, if it already exists, - erasing all data previously stored in the file. -
      H5F_ACC_EXCL -
      Fail if file already exists. -
    -
  • H5F_ACC_TRUNC and H5F_ACC_EXCL - are mutually exclusive; use exactly one. -
  • An additional flag, H5F_ACC_DEBUG, prints - debug information. This flag is used only by HDF5 library - developers; it is neither tested nor supported - for use in applications.
  • hid_t create_id    IN: File creation property list identifier, used when modifying - default file meta-data. - Use H5P_DEFAULT for default file creation properties.
    hid_t access_idIN: File access property list identifier. - If parallel file access is desired, this is a collective - call according to the communicator stored in the - access_id. - Use H5P_DEFAULT for default file access properties.
-
Returns: -
Returns a file identifier if successful; - otherwise returns a negative value. -
Fortran90 Interface: h5fcreate_f -
-
-SUBROUTINE h5fcreate_f(name, access_flags, file_id, hdferr, &  
-                       creation_prp, access_prp)
-  IMPLICIT NONE 
-  CHARACTER(LEN=*), INTENT(IN) :: name   ! Name of the file
-  INTEGER, INTENT(IN) :: access_flag     ! File access flags 
-                                         ! Possible values are:
-                                         !     H5F_ACC_RDWR_F   
-                                         !     H5F_ACC_RDONLY_F   
-                                         !     H5F_ACC_TRUNC_F  
-                                         !     H5F_ACC_EXCL_F    
-                                         !     H5F_ACC_DEBUG_F   
-  INTEGER(HID_T), INTENT(OUT) :: file_id ! File identifier 
-  INTEGER, INTENT(OUT) :: hdferr         ! Error code 
-                                         ! 0 on success and -1 on failure
-  INTEGER(HID_T), OPTIONAL, INTENT(IN) :: creation_prp 
-                                         ! File creation propertly 
-                                         ! list identifier, if not 
-                                         ! specified its value is
-                                         ! H5P_DEFAULT_F  
-  INTEGER(HID_T), OPTIONAL, INTENT(IN) :: access_prp  
-                                         ! File access property list 
-                                         ! identifier, if not 
-                                         ! specified its value is
-                                         ! H5P_DEFAULT_F  
-END SUBROUTINE h5fcreate_f
-    
- - -
- - - -
-
-
Name: H5Fflush -
Signature: -
herr_t H5Fflush(hid_t object_id, - H5F_scope_t scope - ) -
Purpose: -
Flushes all buffers associated with a file to disk. -
Description: -
H5Fflush causes all buffers associated with a - file to be immediately flushed to disk without removing the - data from the cache. -

- object_id can be any object associated with the file, - including the file itself, a dataset, a group, an attribute, or - a named data type. -

- scope specifies whether the scope of the flushing - action is global or local. Valid values are -

- - - - - - - -
H5F_SCOPE_GLOBAL    Flushes the entire virtual file.
H5F_SCOPE_LOCALFlushes only the specified file.
-
-
Note: -
HDF5 does not possess full control over buffering. - H5Fflush flushes the internal HDF5 buffers then - asks the operating system (the OS) to flush the system buffers for the - open files. After that, the OS is responsible for ensuring that - the data is actually flushed to disk. -
Parameters: -
    - - - - - - -
    hid_t object_idIN: Identifier of object used to identify the file.
    H5F_scope_t scope    IN: Specifies the scope of the flushing action.
-
Returns: -
Returns a non-negative value if successful; - otherwise returns a negative value. -
Fortran90 Interface: h5fflush_f -
-
-SUBROUTINE h5fflush_f(obj_id, new_file_id, hdferr)
-  IMPLICIT NONE 
-  INTEGER(HID_T), INTENT(IN)  :: obj_id      ! Object identifier
-  INTEGER, INTENT(IN)         :: scope       ! Flag with two possible values:
-                                             !     H5F_SCOPE_GLOBAL_F  
-                                             !     H5F_SCOPE_LOCAL_F  
-  INTEGER, INTENT(OUT)        :: hdferr      ! Error code 
-                                             ! 0 on success and -1 on failure
-END SUBROUTINE h5fflush_f
-    
- - -
- - - -
-
-
Name: H5Fget_access_plist -
Signature: -
hid_t H5Fget_access_plist(hid_t file_id) -
Purpose: -
Returns a file access property list identifier. -
Description: -
H5Fget_access_plist returns the - file access property list identifier of the specified file. -

- See "File Access Properties" in - H5P: Property List Interface - in this reference manual and - "File Access Property Lists" - in Files in the - HDF5 User's Guide for - additional information and related functions. -

Parameters: -
    - - - -
    hid_t file_id    IN: Identifier of file to get access property list of
-
Returns: -
Returns a file access property list identifier if successful; - otherwise returns a negative value. -
Fortran90 Interface: h5fget_access_plist_f -
-
-SUBROUTINE h5fget_access_plist_f(file_id, fcpl_id, hdferr)
-
-  IMPLICIT NONE 
-  INTEGER(HID_T), INTENT(IN)   :: file_id ! File identifier
-  INTEGER(HID_T), INTENT(OUT)  :: fapl_id ! File access property list identifier
-  INTEGER, INTENT(OUT)         :: hdferr  ! Error code 
-                                          ! 0 on success and -1 on failure
-END SUBROUTINE h5fget_access_plist_f
-    
- - -
- - - -
-
-
Name: H5Fget_create_plist -
Signature: -
hid_t H5Fget_create_plist(hid_t file_id - ) -
Purpose: -
Returns a file creation property list identifier. -
Description: -
H5Fget_create_plist returns a file creation - property list identifier identifying the creation properties - used to create this file. This function is useful for - duplicating properties when creating another file. -

- See "File Creation Properties" in - H5P: Property List Interface - in this reference manual and - "File Creation Properties" - in Files in the - HDF5 User's Guide for - additional information and related functions. -

Parameters: -
    -
    -
    - -
    hid_t file_id    IN: Identifier of the file to get creation property list of
-
Returns: -
Returns a file creation property list identifier if successful; - otherwise returns a negative value. -
Fortran90 Interface: h5fget_create_plist_f -
-
-SUBROUTINE h5fget_create_plist_f(file_id, fcpl_id, hdferr)
-
-  IMPLICIT NONE 
-  INTEGER(HID_T), INTENT(IN)   :: file_id ! File identifier
-  INTEGER(HID_T), INTENT(OUT)  :: fcpl_id ! File creation property list 
-                                          ! identifier
-  INTEGER, INTENT(OUT)         :: hdferr  ! Error code 
-                                          ! 0 on success and -1 on failure
-END SUBROUTINE h5fget_create_plist_f
-    
- - -
- - - - -
-
-
Name: H5Fget_filesize -
Signature: -
herr_t H5Fget_filesize(hid_t file_id, - hsize_t *size - ) -
Purpose: -
Returns the size of an HDF5 file. -
Description: -
H5Fget_filesize returns the size - of the HDF5 file specified by file_id. -

- The returned size is that of the entire file, - as opposed to only the HDF5 portion of the file. - I.e., size includes the user block, if any, - the HDF5 portion of the file, and - any data that may have been appended - beyond the data written through the HDF5 Library. -

Parameters: -
-
hid_t file_id -
IN: Identifier of a currently-open HDF5 file -
hsize_t *size -
OUT: Size of the file, in bytes. -
-
Returns: -
Returns a non-negative value if successful; - otherwise returns a negative value. -
Fortran90 Interface: h5fget_freespace_f -
-
-SUBROUTINE h5fget_filesize_f(file_id, size, hdferr)
-
-  IMPLICIT NONE
-  INTEGER(HID_T), INTENT(IN) :: file_id    ! file identifier
-  INTEGER(HSIZE_T), INTENT(OUT) :: size    ! Size of the file 
-  INTEGER, INTENT(OUT) :: hdferr           ! Error code: 0 on success,
-                                           ! -1 if fail
-END SUBROUTINE h5fget_filesize_f 
-    
- - -
- - - - -
-
-
Name: H5Fget_freespace -
Signature: -
hssize_t H5Fget_freespace(hid_t file_id) -
Purpose: -
Returns the amount of free space in a file. -
Description: -
Given the identifier of an open file, file_id, - H5Fget_freespace returns the amount of space that is - unused by any objects in the file. -

- Currently, the HDF5 library only tracks free space in a file from a - file open or create until that file is closed, so this routine will - only report the free space that has been created during that - interval. -

Parameters: -
    - - - -
    hid_t file_id    IN: Identifier of a currently-open HDF5 file
-
Returns: -
Returns a the amount of free space in the file if successful; - otherwise returns a negative value. -
Returns: -
Returns a file creation property list identifier if successful; - otherwise returns a negative value. -
Fortran90 Interface: h5fget_freespace_f -
-
-SUBROUTINE h5fget_freespace_f(file_id, free_space, hdferr)
-
-  IMPLICIT NONE 
-  INTEGER(HID_T), INTENT(IN)  :: file_id       ! File identifier
-  INTEGER(HSSIZE_T), INTENT(OUT) :: free_space ! Amount of free space in file
-  INTEGER, INTENT(OUT)        :: hdferr        ! Error code 
-                                               ! 0 on success and -1 on failure
-END SUBROUTINE h5fget_freespace_f 
-    
- - -
- - - -
-
-
Name: H5Fget_mdc_config -
Signature: -
herr_t H5Fget_mdc_config(hid_t - file_id, H5AC_cache_config_t *config_ptr) -
Purpose: -
Obtain current metadata cache configuration for target file. -
Description: -
H5Fget_mdc_config loads the current metadata cache - configuration into the instance of H5AC_cache_config_t - pointed to by the config_ptr parameter. - -

Note that the version field of *config_ptr must - be initialized --this allows the library to support old versions - of the H5AC_cache_config_t structure. - -

See the overview of the metadata cache in the special topics section - of the user manual for details on metadata cache configuration. - If you haven't read and understood that documentation, the results - of this call will not make much sense. -

Parameters: -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    hid_t file_id    IN: Identifier of the target file
    H5AC_cache_config_t *config_ptr    IN/OUT: Pointer to the instance of H5AC_cache_config_t - in which the current metadata cache configuration is to be reported. - The fields of this structure are discussed below:
     
    General configuration section:
    int version    IN: Integer field indicating the the version - of the H5AC_cache_config_t in use. This field should be - set to H5AC__CURR_CACHE_CONFIG_VERSION - (defined in H5ACpublic.h).
    hbool_t rpt_fcn_enabled    OUT: Boolean flag indicating whether the adaptive - cache resize report function is enabled. This field should almost - always be set to FALSE. Since resize algorithm activity is reported - via stdout, it MUST be set to FALSE on Windows machines. -

    The report function is not supported code, and can be - expected to change between versions of the library. - Use it at your own risk.

    hbool_t set_initial_size    OUT: Boolean flag indicating whether the cache - should be created with a user specified initial maximum size. -

    If the configuration is loaded from the cache, - this flag will always be FALSE.

    size_t initial_size    OUT: Initial maximum size of the cache in bytes, - if applicable. -

    If the configuration is loaded from the cache, this - field will contain the cache maximum size as of the - time of the call.

    double min_clean_fraction    OUT: This field is only used in the parallel - version of the library. It specifies the minimum fraction - of the cache that must be kept either clean or - empty when possible.
    size_t max_size    OUT: Upper bound (in bytes) on the range of - values that the adaptive cache resize code can select as - the maximum cache size.
    size_t min_size    OUT: Lower bound (in bytes) on the range - of values that the adaptive cache resize code can - select as the maximum cache size.
    long int epoch_length    OUT: Number of cache accesses between runs - of the adaptive cache resize code.
     
    Increment configuration section:
    enum H5C_cache_incr_mode incr_mode    OUT: Enumerated value indicating the operational - mode of the automatic cache size increase code. At present, - only the following values are legal: -

    H5C_incr__off: Automatic cache size increase is disabled. -

    H5C_incr__threshold: Automatic cache size increase is - enabled using the hit rate threshold algorithm.

    double lower_hr_threshold    OUT: Hit rate threshold used in the hit rate - threshold cache size increase algorithm.
    double increment    OUT: The factor by which the current maximum - cache size is multiplied to obtain an initial new maximum cache - size if a size increase is triggered in the hit rate - threshold cache size increase algorithm.
    hbool_t apply_max_increment    OUT: Boolean flag indicating whether an upper - limit will be applied to the size of cache size increases.
    size_t max_increment    OUT: The maximum number of bytes by which the - maximum cache size can be increased in a single step -- if - applicable.
     
    Decrement configuration section:
    enum H5C_cache_decr_mode decr_mode    OUT: Enumerated value indicating the operational - mode of the automatic cache size decrease code. At present, - the following values are legal: -

    H5C_decr__off: Automatic cache size decrease is disabled, - and the remaining decrement fields are ignored. -

    H5C_decr__threshold: Automatic cache size decrease is - enabled using the hit rate threshold algorithm. -

    H5C_decr__age_out: Automatic cache size decrease is enabled - using the ageout algorithm. -

    H5C_decr__age_out_with_threshold: Automatic cache size - decrease is enabled using the ageout with hit rate - threshold algorithm

    double upper_hr_threshold    OUT: Upper hit rate threshold. This value is only - used if the decr_mode is either H5C_decr__threshold or - H5C_decr__age_out_with_threshold.
    double decrement    OUT: Factor by which the current max cache size - is multiplied to obtain an initial value for the new cache - size when cache size reduction is triggered in the hit rate - threshold cache size reduction algorithm.
    hbool_t apply_max_decrement    OUT: Boolean flag indicating whether an upper - limit should be applied to the size of cache size - decreases.
    size_t max_decrement    OUT: The maximum number of bytes by which cache - size can be decreased if any single step, if applicable.
    int epochs_before_eviction    OUT: The minimum number of epochs that an entry - must reside unaccessed in cache before being evicted under - either of the ageout cache size reduction algorithms.
    hbool_t apply_empty_reserve    OUT: Boolean flag indicating whether an empty - reserve should be maintained under either of the ageout - cache size reduction algorithms.
    double empty_reserve    OUT: Empty reserve for use with the ageout - cache size reduction algorithms, if applicable.
-
Returns: -
Returns a non-negative value if successful; otherwise returns a negative value. - -
- - - -
-
-
Name: H5Fget_mdc_hit_rate -
Signature: -
herr_t H5Fget_mdc_hit_rate(hid_t - file_id, double *hit_rate_ptr) -
Purpose: -
Obtain target file's metadata cache hit rate. -
Description: -
H5Fget_mdc_hit_rate queries the metadata cache of the target - file to obtain its hit rate (cache hits / (cache hits + cache misses)) - since the last time hit rate statistics were reset. If the cache has - not been accessed since the last time the hit rate stats were reset, - the hit rate is defined to be 0.0. -

The hit rate stats can be reset either manually (via - H5Freset_mdc_hit_rate_stats()), or automatically. If the cache's - adaptive resize code is enabled, the hit rate stats will be reset - once per epoch. If they are reset manually as well, - the cache may behave oddly. -

See the overview of the metadata cache in the special - topics section of the user manual for details on the metadata - cache and its adaptive resize algorithms. -

Parameters: -
    - - - - - - -
    hid_t file_id -     IN: Identifier of the target file.
    double * hit_rate_ptr -     OUT: Pointer to the double in which the - hit rate is returned. Note that *hit_rate_ptr is - undefined if the API call fails.
-
Returns: -
Returns a non-negative value if successful; otherwise - returns a negative value. - -
- - - -
-
-
Name: H5Fget_mdc_size -
Signature: -
herr_t H5Fget_mdc_size(hid_t file_id, - size_t *max_size_ptr, - size_t *min_clean_size_ptr, - size_t *cur_size_ptr, - int *cur_num_entries_ptr) -
Purpose: -
Obtain current metadata cache size data for specified file. -
Description: -
H5Fget_mdc_size queries the metadata cache of the target file - for the desired size information, and returns this information in - the locations indicated by the pointer parameters. If any pointer - parameter is NULL, the associated data is not returned. -

If the API call fails, the values returned via the pointer - parameters are undefined. -

If adaptive cache resizing is enabled, the cache maximum size - and minimum clean size may change at the end of each epoch. Current - size and current number of entries can change on each cache access. -

Current size can exceed maximum size under certain conditions. - See the overview of the metadata cache in the special topics - section of the user manual for a discussion of this. -

Parameters: -
    - - - - - - - - - - - - - - - -
    hid_t file_id -     IN: Identifier of the target file.
    size_t *max_size_ptr -     OUT: Pointer to the location in which the - current cache maximum size is to be returned, or NULL if - this datum is not desired.
    size_t *min_clean_size_ptr -     OUT: Pointer to the location in which the - current cache minimum clean size is to be returned, or - NULL if that datum is not desired.
    size_t *cur_size_ptr -     OUT: Pointer to the location in which the - current cache size is to be returned, or NULL if that - datum is not desired.
    int *cur_num_entries_ptr -     OUT: Pointer to the location in which the - current number of entries in the cache is to be returned, - or NULL if that datum is not desired.
-
Returns: -
Returns a non-negative value if successful; otherwise returns a - negative value. - -
- - - -
-
-
Name: H5Fget_name -
Signature: -
ssize_t H5Fget_name(hid_t obj_id, - char *name, - size_t size - ) - -
Purpose: -
Retrieves name of file to which object belongs. -
Description: -
H5Fget_name retrieves the name of the file - to which the object obj_id belongs. - The object can be a group, dataset, attribute, or - named datatype. -

- Up to size characters of the filename - are returned in name; - additional characters, if any, are not returned to - the user application. -

- If the length of the name, - which determines the required value of size, - is unknown, a preliminary H5Fget_name call - can be made by setting name to NULL. - The return value of this call will be the size of the filename; - that value can then be assigned to size - for a second H5Fget_name call, - which will retrieve the actual name. -

- If an error occurs, the buffer pointed to by - name is unchanged and - the function returns a negative value. -

Parameters: -
-
hid_t obj_id -
IN: Identifier of the object for which the - associated filename is sought. - The object can be a group, dataset, attribute, or - named datatype. -
char *name -
OUT: Buffer to contain the returned filename. -
size_t size -
IN: Size, in bytes, of the name buffer. -
-
Returns: -
Returns the length of the filename if successful; - otherwise returns a negative value. -
Fortran90 Interface: h5fget_name_f -
-
-SUBROUTINE h5fget_name_f(obj_id, buf, size, hdferr)
-
-  IMPLICIT NONE
-  INTEGER(HID_T), INTENT(IN) :: obj_id     ! Object identifier 
-  CHARACTER(LEN=*), INTENT(INOUT) :: buf   ! Buffer to hold filename
-  INTEGER(SIZE_T), INTENT(OUT) :: size     ! Size of the filename
-  INTEGER, INTENT(OUT) :: hdferr           ! Error code: 0 on success,
-                                           ! -1 if fail
-END SUBROUTINE h5fget_name_f
-    
- - -
- - - - -
-
-
Name: H5Fget_obj_count -
Signature: -
int H5Fget_obj_count(hid_t file_id, - unsigned int types - ) -
Purpose: -
Returns the number of open object identifiers for an open file. -
Description: -
Given the identifier of an open file, file_id, - and the desired object types, types, - H5Fget_obj_count returns the number of - open object identifiers for the file. -

- To retrieve a count of open identifiers for open objects in - all HDF5 application files that are currently open, - pass the value H5F_OBJ_ALL in file_id. -

- The types of objects to be counted are specified - in types as follows: -

- - -
- H5F_OBJ_FILE - - Files only -
- H5F_OBJ_DATASET - - Datasets only -
- H5F_OBJ_GROUP - - Groups only -
- H5F_OBJ_DATATYPE   - - Named datatypes only -
- H5F_OBJ_ATTR   - - Attributes only -
- H5F_OBJ_ALL - - All of the above -
- (I.e., H5F_OBJ_FILE | H5F_OBJ_DATASET | - H5F_OBJ_GROUP | H5F_OBJ_DATATYPE - | H5F_OBJ_ATTR ) -
-
- Multiple object types can be combined with the - logical OR operator (|). - For example, the expression (H5F_OBJ_DATASET|H5F_OBJ_GROUP) would call for - datasets and groups. -
Parameters: -
    - - - - - - -
    hid_t file_idIN: Identifier of a currently-open HDF5 file or - H5F_OBJ_ALL for all currently-open HDF5 files.
    unsigned int types    IN: Type of object for which identifiers are to be returned.
-
Returns: -
Returns a the number of open objects if successful; - otherwise returns a negative value. -
Fortran90 Interface: h5fget_obj_count_f -
-
-SUBROUTINE h5fget_obj_count_f(file_id, obj_type, obj_count, hdferr)
-
-  IMPLICIT NONE 
-  INTEGER(HID_T), INTENT(IN)  :: file_id   ! File identifier
-  INTEGER, INTENT(IN)         :: obj_type  ! Object types, possible values are:
-                                           !     H5F_OBJ_FILE_F
-                                           !     H5F_OBJ_GROUP_F
-                                           !     H5F_OBJ_DATASET_F
-                                           !     H5F_OBJ_DATATYPE_F
-                                           !     H5F_OBJ_ALL_F
-  INTEGER, INTENT(OUT)        :: obj_count ! Number of opened objects
-  INTEGER, INTENT(OUT)        :: hdferr    ! Error code 
-                                           ! 0 on success and -1 on failure
-END SUBROUTINE h5fget_obj_count_f
-    
- - -
- - - -
-
-
Name: H5Fget_obj_ids -
Signature: -
int H5Fget_obj_ids(hid_t file_id, - unsigned int types, - int max_objs, - hid_t *obj_id_list - ) -
Purpose: -
Returns a list of open object identifiers. -
Description: -
Given the file identifier file_id and - the type of objects to be identified, types, - H5Fget_obj_ids returns the list of identifiers - for all open HDF5 objects fitting the specified criteria. -

- To retrieve identifiers for open objects in all HDF5 application - files that are currently open, pass the value - H5F_OBJ_ALL in file_id. -

- The types of object identifiers to be retrieved are specified - in types using the codes listed for the same - parameter in H5Fget_obj_count -

- To retrieve identifiers for all open objects, pass a negative value - for the max_objs. -

Parameters: -
    - - - - - - - - - - - - -
    hid_t file_idIN: Identifier of a currently-open HDF5 file or - H5F_OBJ_ALL for all currently-open HDF5 files.
    unsigned int typesIN: Type of object for which identifiers are to be returned.
    int max_objsIN: Maximum number of object identifiers to place into - obj_id_list.
    hid_t *obj_id_list    OUT: Pointer to the returned list of open object identifiers.
-
Returns: -
Returns number of objects placed into obj_id_list if successful; - otherwise returns a negative value. -
Fortran90 Interface: h5fget_obj_ids_f -
-
-SUBROUTINE h5fget_obj_ids_f(file_id, obj_type, max_objs, obj_ids, hdferr)
-
-  IMPLICIT NONE 
-  INTEGER(HID_T), INTENT(IN)   :: file_id  ! File identifier
-  INTEGER,        INTENT(IN)   :: obj_type ! Object types, possible values are:
-                                           !     H5F_OBJ_FILE_F
-                                           !     H5F_OBJ_GROUP_F
-                                           !     H5F_OBJ_DATASET_F
-                                           !     H5F_OBJ_DATATYPE_F
-                                           !     H5F_OBJ_ALL_F
-  INTEGER, INTENT(IN)          :: max_objs ! Maximum number of object 
-                                           ! identifiers to retrieve
-  INTEGER(HID_T), DIMENSION(*), INTENT(OUT) :: obj_ids
-                                           ! Array of requested object 
-                                           ! identifiers
-  INTEGER, INTENT(OUT)        :: hdferr    ! Error code 
-                                           ! 0 on success and -1 on failure
-END SUBROUTINE h5fget_obj_ids_f
-    
- - -
- - - -
-
-
Name: H5Fget_vfd_handle -
Signature: -
herr_t H5Fget_vfd_handle(hid_t file_id, - hid_t fapl_id, - void *file_handle - ) -
Purpose: -
Returns pointer to the file handle from the virtual file driver. -
Description: -
Given the file identifier file_id and - the file access property list fapl_id, - H5Fget_vfd_handle returns a pointer to the file handle - from the low-level file driver currently being used by the - HDF5 library for file I/O. -
Notes: -
Users are not supposed to modify any file through this file handle. -

- This file handle is dynamic and is valid only while the file remains - open; it will be invalid if the file is closed and reopened or - opened during a subsequent session. -

Parameters: -
    - - - - - - - - - -
    hid_t file_idIN: Identifier of the file to be queried.
    hid_t fapl_idIN: File access property list identifier. - For most drivers, the value will be H5P_DEFAULT. - For the FAMILY or MULTI drivers, - this value should be defined through the property list - functions: - H5Pset_family_offset for the FAMILY - driver and H5Pset_multi_type for the - MULTI driver.
    void *file_handle    OUT: Pointer to the file handle being used by - the low-level virtual file driver.
-
Returns: -
Returns a non-negative value if successful; - otherwise returns a negative value. -
Fortran90 Interface: -
None. - - - - -
- - - -
-
-
Name: H5Fis_hdf5 -
Signature: -
htri_t H5Fis_hdf5(const char *name - ) -
Purpose: -
Determines whether a file is in the HDF5 format. -
Description: -
H5Fis_hdf5 determines whether a file is in - the HDF5 format. -
Parameters: -
    - - - -
    const char *name    IN: File name to check format.
-
Returns: -
When successful, returns a positive value, for TRUE, - or 0 (zero), for FALSE. - Otherwise returns a negative value. -
Fortran90 Interface: h5fis_hdf5_f -
-
-SUBROUTINE h5fis_hdf5_f(name, status, hdferr)   
-  IMPLICIT NONE 
-  CHARACTER(LEN=*), INTENT(IN) :: name   ! Name of the file
-  LOGICAL, INTENT(OUT) :: status         ! This parameter indicates 
-                                         ! whether file is an HDF5 file 
-                                         ! ( TRUE or FALSE ) 
-  INTEGER, INTENT(OUT) :: hdferr         ! Error code 
-                                         ! 0 on success and -1 on failure
-END SUBROUTINE h5fis_hdf5_f
-    
- - -
- - - -
-
-
Name: H5Fmount -
Signature: -
herr_t H5Fmount(hid_t loc_id, - const char *name, - hid_t child_id, - hid_t plist_id - ) -
Purpose: -
Mounts a file. -
Description: -
H5Fmount mounts the file specified by - child_id onto the group specified by - loc_id and name using - the mount properties plist_id. -

- Note that loc_id is either a file or group identifier - and name is relative to loc_id. -

Parameters: -
    - - - - - - - - - - - - -
    hid_t loc_idIN: Identifier for of file or group in - which name is defined.
    const char *name    IN: Name of the group onto which the - file specified by child_id - is to be mounted.
    hid_t child_idIN: Identifier of the file to be mounted.
    hid_t plist_idIN: Identifier of the property list to be used.
-
Returns: -
Returns a non-negative value if successful; - otherwise returns a negative value. -
Fortran90 Interface: h5fmount_f -
-
-SUBROUTINE h5fmount_f(loc_id, name, child_id, hdferr)
-  IMPLICIT NONE 
-  INTEGER(HID_T), INTENT(IN)  :: loc_id      ! File or group identifier
-  CHARACTER(LEN=*), INTENT(IN):: name        ! Group name at locationloc_id
-  INTEGER(HID_T), INTENT(IN)  :: child_id    ! File(to be mounted) identifier
-  INTEGER, INTENT(OUT)        :: hdferr      ! Error code 
-                                             ! 0 on success and -1 on failure
-END SUBROUTINE h5fmount_f
-    
- - -
- - - -
-
-
Name: H5Fopen -
Signature: -
hid_t H5Fopen(const char *name, - unsigned flags, - hid_t access_id - ) -
Purpose: -
Opens an existing file. -
Description: -
H5Fopen opens an existing file and is the primary - function for accessing existing HDF5 files. -

- The parameter access_id is a file access property - list identifier or H5P_DEFAULT if the - default I/O access parameters are to be used -

- The flags argument determines whether writing - to an existing file will be allowed. - The file is opened with read and write permission if - flags is set to H5F_ACC_RDWR. - All flags may be combined with the bit-wise OR operator (`|') - to change the behavior of the file open call. - More complex behaviors of file access are controlled - through the file-access property list. -

- The return value is a file identifier for the open file; - this file identifier should be closed by calling - H5Fclose when it is no longer needed. -

- - Special case -- Multiple opens: -
- A file can often be opened with a new H5Fopen - call without closing an already-open identifier established - in a previous H5Fopen or H5Fcreate - call. Each such H5Fopen call will return a - unique identifier and the file can be accessed through any - of these identifiers as long as the identifier remains valid. - In such multiply-opened cases, all the open calls should - use the same flags argument. -

- In some cases, such as files on a local Unix file system, - the HDF5 library can detect that a file is multiply opened and - will maintain coherent access among the file identifiers. -

- But in many other cases, such as parallel file systems or - networked file systems, it is not always possible to detect - multiple opens of the same physical file. - In such cases, HDF5 will treat the file identifiers - as though they are accessing different files and - will be unable to maintain coherent access. - Errors are likely to result in these cases. - While unlikely, the HDF5 library may not be able to detect, - and thus report, such errors. -

- It is generally recommended that applications avoid - multiple opens of the same file. - -

Parameters: -
    - - - - - - - - - - -
    const char *name    IN: Name of the file to access.
    unsigned flagsIN: File access flags. Allowable values are: -
      -
      H5F_ACC_RDWR -
      Allow read and write access to file. -
      H5F_ACC_RDONLY -
      Allow read-only access to file. -
      -
    • H5F_ACC_RDWR and H5F_ACC_RDONLY - are mutually exclusive; use exactly one. -
    • An additional flag, H5F_ACC_DEBUG, prints - debug information. This flag is used only by HDF5 library - developers; it is neither tested nor supported - for use in applications. -
    hid_t access_id    IN: Identifier for the file access properties list. - If parallel file access is desired, this is a collective - call according to the communicator stored in the - access_id. - Use H5P_DEFAULT for default file access properties.
-
Returns: -
Returns a file identifier if successful; - otherwise returns a negative value. -
Fortran90 Interface: h5fopen_f -
-
-SUBROUTINE h5fopen_f(name, access_flags, file_id, hdferr, &  
-                     access_prp)
-  IMPLICIT NONE 
-  CHARACTER(LEN=*), INTENT(IN) :: name   ! Name of the file
-  INTEGER, INTENT(IN) :: access_flag     ! File access flags  
-                                         ! Possible values are:
-                                         !     H5F_ACC_RDWR_F   
-                                         !     H5F_ACC_RDONLY_F    
-  INTEGER(HID_T), INTENT(OUT) :: file_id ! File identifier 
-  INTEGER, INTENT(OUT) :: hdferr         ! Error code 
-                                         ! 0 on success and -1 on failure
-  INTEGER(HID_T), OPTIONAL, INTENT(IN) :: access_prp  
-                                         ! File access property list 
-                                         ! identifier  
-END SUBROUTINE h5fopen_f
-    
- - -
- - - -
-
-
Name: H5Freopen -
Signature: -
hid_t H5Freopen(hid_t file_id - ) -
Purpose: -
Returns a new identifier for a previously-opened HDF5 file. -
Description: -
H5Freopen returns a new file identifier for an - already-open HDF5 file, as specified by file_id. - Both identifiers share caches and other information. - The only difference between the identifiers is that the - new identifier is not mounted anywhere and no files are - mounted on it. -

- Note that there is no circumstance under which - H5Freopen can actually open a closed file; - the file must already be open and have an active - file_id. E.g., one cannot close a file with - H5Fclose (file_id) then use - H5Freopen (file_id) to reopen it. -

- The new file identifier should be closed by calling - H5Fclose when it is no longer needed. -

Parameters: -
    - - - -
    hid_t file_id    IN: Identifier of a file for which an additional identifier - is required.
-
Returns: -
Returns a new file identifier if successful; - otherwise returns a negative value. -
Fortran90 Interface: h5freopen_f -
-
-SUBROUTINE h5freopen_f(file_id, new_file_id, hdferr)
-  IMPLICIT NONE 
-  INTEGER(HID_T), INTENT(IN)  :: file_id     ! File identifier 
-  INTEGER(HID_T), INTENT(OUT) :: new_file_id ! New file identifier 
-  INTEGER, INTENT(OUT)        :: hdferr      ! Error code 
-                                             ! 0 on success and -1 on failure
-END SUBROUTINE h5freopen_f 
-    
- - -
- - - -
-
-
Name: H5Freset_mdc_hit_rate_stats -
Signature: -
herr_t H5Freset_mdc_hit_rate_stats(hid_t - file_id) -
Purpose: -
Reset hit rate statistics counters for the target file. -
Description: -
H5Freset_mdc_hit_rate_stats resets the hit rate statistics - counters in the metadata cache associated with the specified - file. -

If the adaptive cache resizing code is enabled, the hit - rate statistics are reset at the beginning of each epoch. - This API call allows you to do the same thing from your program. -

The adaptive cache resizing code may behave oddly if you use - this call when adaptive cache resizing is enabled. However, - the call should be useful if you choose to control metadata - cache size from your program. -

See the overview of the metadata cache in the special topics - section of the user manual for details of the metadata cache and - the adaptive cache resizing algorithms. If you haven't read, - understood, and thought about the material covered in that - documentation, you shouldn't be using this API call. -

Parameters: -
    - - - -
    hid_t file_id    IN: Identifier of the target file.
-
Returns: -
Returns a non-negative value if successful; otherwise returns a - negative value. - -
- - - -
-
-
Name: H5Fset_mdc_config -
Signature: -
herr_t H5Fset_mdc_config(hid_t - file_id, H5AC_cache_config_t *config_ptr) -
Purpose: -
Attempt to configure metadata cache of target file. -
Description: -
H5Fset_mdc_config attempts to configure the file's metadata cache - according configuration supplied in *config_ptr. -

See the overview of the metadata cache in the special topics - section of the user manual for details on what is being configured. - If you haven't read and understood that documentation, you really - shouldn't be using this API call. -

Parameters: -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    hid_t file_id -     IN: Identifier of the target file
    H5AC_cache_config_t *config_ptr -     IN: Pointer to the instance of H5AC_cache_config_t - containing the desired configuration. The fields of this structure - are discussed below:
     
    General configuration section:
    int version -     IN: Integer field indicating the the version of - the H5AC_cache_config_t in use. This field should be set to - H5AC__CURR_CACHE_CONFIG_VERSION (defined in H5ACpublic.h).
    hbool_t rpt_fcn_enabled -     IN: Boolean flag indicating whether the adaptive - cache resize report function is enabled. This field should almost - always be set to FALSE. Since resize algorithm activity is reported - via stdout, it MUST be set to FALSE on Windows machines. -

    The report function is not supported code, and can be expected to - change between versions of the library. Use it at your own risk.

    hbool_t set_initial_size -     IN: Boolean flag indicating whether the cache should be - forced to the user specified initial size.
    size_t initial_size -     IN: If set_initial_size is TRUE, initial_size must - contains the desired initial size in bytes. This value must lie - in the closed interval [min_size, max_size]. (see below)
    double min_clean_fraction -     IN: This field is only used in the parallel version - of the library. It specifies the minimum fraction of the cache that - must be kept either clean or empty. -

    The value must lie in the interval [0.0, 1.0]. 0.25 is a good place - to start.

    size_t max_size -     IN: Upper bound (in bytes) on the range of values - that the adaptive cache resize code can select as the maximum - cache size.
    size_t min_size -     IN: Lower bound (in bytes) on the range of values - that the adaptive cache resize code can select as the maximum - cache size.
    long int epoch_length -     IN: Number of cache accesses between runs of the - adaptive cache resize code. 50,000 is a good starting number.
     
    Increment configuration section:
    enum H5C_cache_incr_mode incr_mode -     IN: Enumerated value indicating the operational mode - of the automatic cache size increase code. At present, only two - values are legal: -

    H5C_incr__off: Automatic cache size increase is disabled, - and the remaining increment fields are ignored. -

    H5C_incr__threshold: Automatic cache size increase is enabled - using the hit rate threshold algorithm.

    double lower_hr_threshold -     IN: Hit rate threshold used by the hit rate threshold - cache size increment algorithm. -

    When the hit rate over an epoch is below this threshold and the - cache is full, the maximum size of the cache is multiplied by - increment (below), and then clipped as necessary to stay within - max_size, and possibly max_increment. -

    This field must lie in the interval [0.0, 1.0]. 0.8 or 0.9 - is a good starting point.

    double increment -     IN: Factor by which the hit rate threshold cache - size increment algorithm multiplies the current cache max size - to obtain a tentative new cache size. -

    The actual cache size increase will be clipped to satisfy the - max_size specified in the general configuration, and possibly - max_increment below. -

    The parameter must be greater than or equal to 1.0 -- 2.0 - is a reasonable value. -

    If you set it to 1.0, you will effectively disable cache size - increases.

    hbool_t apply_max_increment -     IN: Boolean flag indicating whether an upper limit - should be applied to the size of cache size increases.
    size_t max_increment -     IN: Maximum number of bytes by which cache size can - be increased in a single step -- if applicable.
     
    Decrement configuration section:
    enum H5C_cache_decr_mode decr_mode -     IN: Enumerated value indicating the operational - mode of the automatic cache size decrease code. At present, - the following values are legal: -

    H5C_decr__off: Automatic cache size decrease is disabled. -

    H5C_decr__threshold: Automatic cache size decrease is - enabled using the hit rate threshold algorithm. -

    H5C_decr__age_out: Automatic cache size decrease is enabled - using the ageout algorithm. -

    H5C_decr__age_out_with_threshold: Automatic cache size - decrease is enabled using the ageout with hit rate threshold - algorithm

    double upper_hr_threshold -     IN: Hit rate threshold for the hit rate threshold and - ageout with hit rate threshold cache size decrement algorithms. -

    When decr_mode is H5C_decr__threshold, and the hit rate over a - given epoch exceeds the supplied threshold, the current maximum - cache size is multiplied by decrement to obtain a tentative new - (and smaller) maximum cache size. -

    When decr_mode is H5C_decr__age_out_with_threshold, there is no - attempt to find and evict aged out entries unless the hit rate in - the previous epoch exceeded the supplied threshold. -

    This field must lie in the interval [0.0, 1.0]. -

    For H5C_incr__threshold, .9995 or .99995 is a good place to start. -

    For H5C_decr__age_out_with_threshold, .999 might be - more useful.

    double decrement -     IN: In the hit rate threshold cache size decrease - algorithm, this parameter contains the factor by which the - current max cache size is multiplied to produce a tentative - new cache size. -

    The actual cache size decrease will be clipped to satisfy the - min_size specified in the general configuration, and possibly - max_decrement below. -

    The parameter must be be in the interval [0.0, 1.0]. -

    If you set it to 1.0, you will effectively disable cache size - decreases. 0.9 is a reasonable starting point.

    hbool_t apply_max_decrement -     IN: Boolean flag indicating whether an upper limit - should be applied to the size of cache size decreases.
    size_t max_decrement -     IN: Maximum number of bytes by which the maximum cache - size can be decreased in any single step -- if applicable.
    int epochs_before_eviction -     IN: In the ageout based cache size reduction algorithms, - this field contains the minimum number of epochs an entry must remain - unaccessed in cache before the cache size reduction algorithm tries to - evict it. 3 is a reasonable value.
    hbool_t apply_empty_reserve -     IN: Boolean flag indicating whether the ageout based - decrement algorithms will maintain a empty reserve when decreasing - cache size.
    double empty_reserve -     IN: Empty reserve as a fraction of maximum cache - size if applicable. -

    When so directed, the ageout based algorithms will not decrease - the maximum cache size unless the empty reserve can be met. -

    The parameter must lie in the interval [0.0, 1.0]. - 0.1 or 0.05 is a good place to start.

-
Returns: -
Returns a non-negative value if successful; otherwise returns a - negative value. - -
- - - -
-
-
Name: H5Funmount -
Signature: -
herr_t H5Funmount(hid_t loc_id, - const char *name - ) -
Purpose: -
Unmounts a file. -
Description: -
Given a mount point, H5Funmount - dissassociates the mount point's file - from the file mounted there. This function - does not close either file. -

- The mount point can be either the group in the - parent or the root group of the mounted file - (both groups have the same name). If the mount - point was opened before the mount then it is the - group in the parent; if it was opened after the - mount then it is the root group of the child. -

- Note that loc_id is either a file or group identifier - and name is relative to loc_id. -

Parameters: -
    - - - - - - -
    hid_t loc_idIN: File or group identifier for the location at which - the specified file is to be unmounted.
    const char *name    IN: Name of the mount point.
-
Returns: -
Returns a non-negative value if successful; - otherwise returns a negative value. -
Fortran90 Interface: h5funmount_f -
-
-SUBROUTINE h5funmount_f(loc_id, name, child_id, hdferr)
-  IMPLICIT NONE 
-  INTEGER(HID_T), INTENT(IN)  :: loc_id      ! File or group identifier
-  CHARACTER(LEN=*), INTENT(IN):: name        ! Group name at location loc_id
-  INTEGER, INTENT(OUT)        :: hdferr      ! Error code 
-                                             ! 0 on success and -1 on failure
-END SUBROUTINE h5funmount_f
-    
- - -
- - - - -
-
- - - -
-HDF5 documents and links 
-Introduction to HDF5 
-HDF5 User Guide 
- -
-And in this document, the -HDF5 Reference Manual   -
-H5IM   -H5LT   -H5PT   -H5TB   -
-H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5I   -H5P   -
-H5R   -H5S   -H5T   -H5Z   -Tools   -Datatypes   -
-
-
-HDF Help Desk -
-Describes HDF5 Release 1.7, the unreleased development branch; working toward HDF5 Release 1.8.0 -
- - - diff --git a/doc/html/RM_H5Front.html b/doc/html/RM_H5Front.html deleted file mode 100644 index 53e6e1b..0000000 --- a/doc/html/RM_H5Front.html +++ /dev/null @@ -1,409 +0,0 @@ - - - - -HDF5 API Specification - - - - - - - - - - - - - - - - -
-
- - - -
-HDF5 documents and links 
-Introduction to HDF5 
-HDF5 User Guide 
- -
-And in this document, the -HDF5 Reference Manual   -
-H5IM   -H5LT   -H5PT   -H5TB   -
-H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5I   -H5P   -
-H5R   -H5S   -H5T   -H5Z   -Tools   -Datatypes   -
-
-
-

HDF5: API Specification
Reference Manual

- - -
-The HDF5 library provides several interfaces, each of which provides the -tools required to meet specific aspects of the HDF5 data-handling requirements. -

- - - -

- -

- - - - - - -
High-level APIsMain HDF5 Library, -
including Low-level APIs
Fortran and C++ -
Interfaces
-
-
-
- - -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-

High-level HDF5 APIs

- The HDF5 Library includes several sets of convenience and - standard-use APIs. - The HDF5 Lite APIs are convenience functions designed to - facilitate common HDF5 operations. - The HDF5 Image, HDF5 Table and HDF5 Packet Table APIs - implement standardized approaches to common use cases - with the intention of improving interoperability. -
 Lite    The H5LT - API general higher-level functions
 Image The H5IM API for images
 Table The H5TB API for manipulating - table datasets 
  - Packet Table -  The H5PT API for managing packet tables (and C++ H5PT wrappers) -
  - Dimension Scales The H5DS API for managing dimension scales 
    
-

Main HDF5 Library, or Low-level APIs

- The main HDF5 Library includes all of the low-level APIs, - providing user applications with fine-grain control of - HDF5 functionality. -
 Library Functions    The general-purpose - H5 functions.
 Attribute Interface The H5A API for attributes.
 Dataset Interface The H5D API for manipulating - scientific datasets.
 Error Interface The H5E API for error handling.
 File Interface The H5F API for accessing HDF files.
 Group Interface The H5G API for creating physical - groups of objects on disk.
 Identifier Interface The H5I API for working with - object identifiers.
 Property List Interface The H5P API for manipulating - object property lists.
 Reference Interface The H5R API for references.
 Dataspace Interface The H5S API for defining dataset - dataspace.
 Datatype Interface The H5T API for defining dataset - element information.
 Filters and
-   Compression Interface
 The H5Z API for inline data filters - and data compression.
 Tools Interactive tools for the examination - of existing HDF5 files.
 Predefined Datatypes Predefined datatypes in HDF5. - -
-
- -

-A PDF version of this HDF5 Reference Manual will be available -from -http://hdf.ncsa.uiuc.edu/HDF5/doc/PSandPDF/ -approximately one week after each release. - - - - -


- - - -

The Fortran90 and C++ APIs to HDF5

-
- -The HDF5 Library distribution includes FORTRAN90 and C++ APIs, -which are described in the following documents. - -

-Fortran90 API -

- - HDF5 FORTRAN90 User's Notes - contains general information regarding the API. - Specific information on each API call is found in the - HDF5 Reference Manual. -

- Fortran90 APIs in the Reference Manual: - The current version of the HDF5 Reference Manual includes - descriptions of the Fortran90 APIs to HDF5. - Fortran subroutines exist in the H5, H5A, H5D, H5E, H5F, H5G, H5I, H5P, - H5R, H5S, H5T, and H5Z interfaces and are described on those pages. - In general, each Fortran subroutine performs exactly the same task - as the corresponding C function. - -

- Whereas Fortran subroutines had been described on separate pages in - prior releases, those descriptions were fully integrated into the - body of the reference manual for HDF5 Release 1.6.2 - (and mostly so for Release 1.6.1). -

- - HDF5 Fortran90 Flags and Datatypes - lists the flags employed in the Fortran90 interface and - contains a pointer to the HDF5 Fortran90 datatypes. -

- -C++ API - - - HDF5 C++ User's Notes -

- - HDF5 C++ Interfaces - -

- - -

- - -
-
- - - -
-HDF5 documents and links 
-Introduction to HDF5 
-HDF5 User Guide 
- -
-And in this document, the -HDF5 Reference Manual   -
-H5IM   -H5LT   -H5PT   -H5TB   -
-H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5I   -H5P   -
-H5R   -H5S   -H5T   -H5Z   -Tools   -Datatypes   -
-
-
- -
- -
-HDF Help Desk -
-Describes HDF5 Release 1.7, the unreleased development branch; working toward HDF5 Release 1.8.0 -
- - -
- -
- - - diff --git a/doc/html/RM_H5G.html b/doc/html/RM_H5G.html deleted file mode 100644 index 0bf9801..0000000 --- a/doc/html/RM_H5G.html +++ /dev/null @@ -1,1521 +0,0 @@ - - -HDF5/H5G API Specification - - - - - - - - - - - - - -
-
- - - -
-HDF5 documents and links 
-Introduction to HDF5 
-HDF5 User Guide 
- -
-And in this document, the -HDF5 Reference Manual   -
-H5IM   -H5LT   -H5PT   -H5TB   -
-H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5I   -H5P   -
-H5R   -H5S   -H5T   -H5Z   -Tools   -Datatypes   -
-
-
-

H5G: Group Interface

-
- -

Group Object API Functions

- -The Group interface functions create and manipulate groups -of objects in an HDF5 file. -

-The C Interfaces: - - - - -
- -       - -       - -
- -Alphabetical Listing - - - - - - - - - - - - - -
- -        - -        - -
- -
-The FORTRAN90 Interfaces: -
-In general, each FORTRAN90 subroutine performs exactly the same task -as the corresponding C function. -
- - - -
- -       - -       - -
- -

-A group associates names with objects and provides a mechanism -for mapping a name to an object. Since all objects appear in at -least one group (with the possible exception of the root object) -and since objects can have names in more than one group, the set -of all objects in an HDF5 file is a directed graph. The internal -nodes (nodes with out-degree greater than zero) must be groups -while the leaf nodes (nodes with out-degree zero) are either empty -groups or objects of some other type. Exactly one object in every -non-empty file is the root object. The root object always has a -positive in-degree because it is pointed to by the file super block. - -

-An object name consists of one or more components separated from -one another by slashes. An absolute name begins with a slash and the -object is located by looking for the first component in the root -object, then looking for the second component in the first object, etc., -until the entire name is traversed. A relative name does not begin -with a slash and the traversal begins at the location specified by the -create or access function. - -

- - - - - -


-
-
Name: H5Gclose -
Signature: -
herr_t H5Gclose(hid_t group_id) -
Purpose: -
Closes the specified group. -
Description: -
H5Gclose releases resources used by a group which was - opened by H5Gcreate or H5Gopen. - After closing a group, the group_id cannot be used again. -

- Failure to release a group with this call will result in resource leaks. -

Parameters: -
    - - - -
    hid_t group_id    IN: Group identifier to release.
-
Returns: -
Returns a non-negative value if successful; - otherwise returns a negative value. -
Fortran90 Interface: h5gclose_f -
-
-SUBROUTINE h5gclose_f( gr_id, hdferr)
-  IMPLICIT NONE
-  INTEGER(HID_T), INTENT(IN) :: gr_id     ! Group identifier
-  INTEGER, INTENT(OUT) :: hdferr          ! Error code 
-                                          ! 0 on success and -1 on failure
-END SUBROUTINE h5gclose_f
-	
- - -
- - - -
-
-
Name: H5Gcreate -
Signature: -
hid_t H5Gcreate(hid_t loc_id, - const char *name, - size_t size_hint - ) -
Purpose: -
Creates a new empty group and gives it a name. -
Description: -
H5Gcreate creates a new group with the specified - name at the specified location, loc_id. - The location is identified by a file or group identifier. - The name, name, must not already be taken by some - other object and all parent groups must already exist. -

- size_hint is a hint for the number of bytes to - reserve to store the names which will be eventually added to - the new group. Passing a value of zero for size_hint - is usually adequate since the library is able to dynamically - resize the name heap, but a correct hint may result in better - performance. - If a non-positive value is supplied for size_hint, - then a default size is chosen. -

- The return value is a group identifier for the open group. - This group identifier should be closed by calling - H5Gclose when it is no longer needed. -

Parameters: -
    - - - - - - - - - -
    hid_t loc_idIN: File or group identifier.
    const char *nameIN: Absolute or relative name of the new group.
    size_t size_hint    IN: Optional parameter indicating the number of bytes - to reserve for the names that will appear in the group. - A conservative estimate could result in multiple - system-level I/O requests to read the group name heap; - a liberal estimate could result in a single large - I/O request even when the group has just a few names. - HDF5 stores each name with a null terminator.
-
Returns: -
Returns a valid group identifier for the open group if successful; - otherwise returns a negative value. -
Fortran90 Interface: h5gcreate_f -
-
-SUBROUTINE h5gcreate_f(loc_id, name, gr_id, hdferr, size_hint)
-
-  IMPLICIT NONE
-  INTEGER(HID_T), INTENT(IN) :: loc_id   ! File or group identifier
-  CHARACTER(LEN=*), INTENT(IN) :: name   ! Name of the group to be created 
-  INTEGER(HID_T), INTENT(OUT) :: gr_id   ! Group identifier
-  INTEGER, INTENT(OUT) :: hdferr         ! Error code 
-                                         ! 0 on success and -1 on failure
-  INTEGER(SIZE_T), OPTIONAL, INTENT(IN) :: size_hint  
-                                         ! Number of bytes to store the names 
-                                         ! of objects in the group. 
-                                         ! Default value is 
-                                         ! OBJECT_NAMELEN_DEFAULT_F
-END SUBROUTINE h5gcreate_f 
-	
- - -
- - - -
-
-
Name: H5Gget_comment -
Signature: -
herr_t H5Gget_comment(hid_t loc_id, - const char *name, - size_t bufsize, - char *comment - ) -
Purpose: -
Retrieves comment for specified object. -
Description: -
H5Gget_comment retrieves the comment for the the - object specified by loc_id and name. - The comment is returned in the buffer comment. -

- At most bufsize characters, including a null - terminator, are returned in comment. - The returned value is not null terminated - if the comment is longer than the supplied buffer. -

- If an object does not have a comment, the empty string - is returned. -

Parameters: -
    - - - - - - - - - - - - -
    hid_t loc_idIN: Identifier of the file, group, dataset, or - named datatype.
    const char *name    IN: Name of the object in loc_id whose - comment is to be retrieved. -
    - name can be '.' (dot) if loc_id - fully specifies the object for which the associated comment - is to be retrieved. -
    - name is ignored if loc_id - is a dataset or named datatype. -
    size_t bufsizeIN: Anticipated required size of the - comment buffer.
    char *commentOUT: The comment.
-
Returns: -
Returns the number of characters in the comment, - counting the null terminator, if successful; the value - returned may be larger than bufsize. - Otherwise returns a negative value. -
Fortran90 Interface: h5gget_comment_f -
-
-SUBROUTINE h5gget_comment_f(loc_id, name, size, buffer, hdferr)
-  IMPLICIT NONE
-  INTEGER(HID_T), INTENT(IN) :: loc_id         ! File, group, dataset, or
-                                               ! named datatype identifier  
-  CHARACTER(LEN=*), INTENT(IN) :: name         ! Name of the object link 
-  CHARACTER(LEN=size), INTENT(OUT) :: buffer   ! Buffer to hold the comment
-  INTEGER, INTENT(OUT) :: hdferr               ! Error code 
-                                               ! 0 on success and -1 on failure
-END SUBROUTINE h5gget_comment_f
-    
- - -
- - - -
-
-
Name: H5Gget_linkval -
Signature: -
herr_t H5Gget_linkval(hid_t loc_id, - const char *name, - size_t size, - char *value - ) -
Purpose: -
Returns the name of the object that the symbolic link points to. -
Description: -
H5Gget_linkval returns size - characters of the name of the object that the symbolic link name points to. -

- The parameter loc_id is a file or group identifier. -

- The parameter name must be a symbolic link pointing to - the desired object and must be defined relative to loc_id. -

- If size is smaller than the size of the returned object name, then - the name stored in the buffer value will not be null terminated. -

- This function fails if name is not a symbolic link. - The presence of a symbolic link can be tested by passing zero for - size and NULL for value. -

- This function should be used only after H5Gget_objinfo has been called - to verify that name is a symbolic link. -

Parameters: -
    - - - - - - - - - - - - -
    hid_t loc_idIN: Identifier of the file or group.
    const char *name    IN: Symbolic link to the object whose name is to be returned.
    size_t sizeIN: Maximum number of characters of value - to be returned.
    char *valueOUT: A buffer to hold the name of the object being sought.
-
Returns: -
Returns a non-negative value, with the link value in value, - if successful. - Otherwise returns a negative value. -
Fortran90 Interface: h5gget_linkval_f -
-
-SUBROUTINE h5gget_linkval_f(loc_id, name, size, buffer, hdferr)
-  IMPLICIT NONE
-  INTEGER(HID_T), INTENT(IN) :: loc_id         ! File or group identifier
-  CHARACTER(LEN=*), INTENT(IN) :: name         ! Name of the symbolic link 
-  CHARACTER(LEN=size), INTENT(OUT) :: buffer   ! Buffer to hold a
-                                               ! name of the object 
-                                               ! symbolic link points to
-  INTEGER, INTENT(OUT) :: hdferr               ! Error code 
-                                               ! 0 on success and -1 on failure
-END SUBROUTINE h5gget_linkval_f
-	
- - -
- - - -
-
-
Name: H5Gget_num_objs -
Signature: -
herr_t H5Gget_num_objs(hid_t loc_id, - hsize_t* num_obj) -
Purpose: -
Returns number of objects in the group specified by its identifier -
Description: -
H5Gget_num_objs returns number of objects in a group. - Group is specified by its identifier loc_id. - If a file identifier is passed in, then the number of objects in the - root group is returned. -
Parameters: -
    - - - - - - -
    hid_t loc_idIN: Identifier of the group or the file
    hsize_t *num_obj    OUT: Number of objects in the group.
-
Returns: -
Returns positive value if successful; - otherwise returns a negative value. -
Fortran90 Interface: -
None. - - - - -
- - - -
-
-
Name: H5Gget_objinfo -
Signature: -
herr_t H5Gget_objinfo(hid_t loc_id, - const char *name, - hbool_t follow_link, - H5G_stat_t *statbuf - ) -
Purpose: -
Returns information about an object. -
Description: -
H5Gget_objinfo returns information about the - specified object through the statbuf argument. - loc_id (a file or group identifier) and - name together determine the object. - If the object is a symbolic link and follow_link is - zero (0), then the information returned is that for the link itself; - otherwise the link is followed and information is returned about - the object to which the link points. - If follow_link is non-zero but the final symbolic link - is dangling (does not point to anything), then an error is returned. - The statbuf fields are undefined for an error. - The existence of an object can be tested by calling this function - with a null statbuf. -

- H5Gget_objinfo fills in the following data structure - (defined in H5Gpublic.h): -

-                  typedef struct H5G_stat_t {
-                      unsigned long fileno;
-                      haddr_t objno;
-                      unsigned nlink;
-                      H5G_obj_t type;
-                      time_t mtime; 
-                      size_t linklen;
-                      H5O_stat_t ohdr;
-                  } H5G_stat_t
-        
- - where H5O_stat_t (defined in H5Opublic.h) is: - -
-                  typedef struct H5O_stat_t {
-                      hsize_t size;
-                      hsize_t free;
-                      unsigned nmesgs;
-                      unsigned nchunks;
-                  } H5O_stat_t
-        
- The fileno and objno fields contain - four values which uniquely identify an object among those - HDF5 files which are open: if all four values are the same - between two objects, then the two objects are the same - (provided both files are still open). -
    -
  • Note that if a file is closed and re-opened, the - value in fileno will change. -
  • If a VFL driver either does not or cannot detect that - two H5Fopen calls referencing the same file - actually open the same file, each will get a different - fileno. -
-

- The nlink field is the number of hard links to - the object or zero when information is being returned about a - symbolic link (symbolic links do not have hard links but - all other objects always have at least one). -

- The type field contains the type of the object, - one of - H5G_GROUP, - H5G_DATASET, - H5G_LINK, or - H5G_TYPE. -

- The mtime field contains the modification time. -

- If information is being returned about a symbolic link then - linklen will be the length of the link value - (the name of the pointed-to object with the null terminator); - otherwise linklen will be zero. -

- The fields in the H5O_stat_t struct contain information - about the object header for the object queried: -

    -
    size -
    The total size of all the object header information in - the file (for all chunks). -
    free -
    The size of unused space in the object header. -
    nmesgs -
    The number of object header messages. -
    nchunks -
    The number of chunks the object header is broken up into. -
- -

- Other fields may be added to this structure in the future. -

Note: -
Some systems will be able to record the time accurately but - unable to retrieve the correct time; such systems (e.g., Irix64) - will report an mtime value of 0 (zero). -
Parameters: -
    - - - - - - - - - - - - -
    hid_t loc_idIN: File or group identifier.
    const char *nameIN: Name of the object for which status is being sought.
    hbool_t follow_linkIN: Link flag.
    H5G_stat_t *statbuf    OUT: Buffer in which to return information about the object.
-
Returns: -
Returns a non-negative value if successful, with the fields of - statbuf (if non-null) initialized. - Otherwise returns a negative value. -
Fortran90 Interface: -
None. - - - - -
- - - -
-
-
Name: H5Gget_objname_by_idx -
Signature: -
ssize_t H5Gget_objname_by_idx(hid_t loc_id, - hsize_t idx, - char *name, - size_t size ) -
Purpose: -
Returns a name of an object specified by an index. -
Description: -
H5Gget_objname_by_idx returns a name of the object - specified by the index idx in the group loc_id. -

- The group is specified by a group identifier loc_id. - If preferred, a file identifier may be passed in loc_id; - that file's root group will be assumed. -

- idx is the transient index used to iterate through - the objects in the group. - The value of idx is any nonnegative number less than - the total number of objects in the group, which is returned by the - function H5Gget_num_objs. - Note that this is a transient index; an object may have a - different index each time a group is opened. -

- The object name is returned in the user-specified buffer name. -

- If the size of the provided buffer name is - less or equal the actual object name length, - the object name is truncated to max_size - 1 characters. -

- Note that if the size of the object's name is unkown, a - preliminary call to H5Gget_objname_by_idx with name - set to NULL will return the length of the object's name. - A second call to H5Gget_objname_by_idx - can then be used to retrieve the actual name. -

Parameters: -
    - - - - - - - - - - - - -
    hid_t loc_id    IN: Group or file identifier.
    hsize_t idxIN: Transient index identifying object.
    char *nameIN/OUT: Pointer to user-provided buffer the object name.
    size_t sizeIN: Name length.
-
Returns: -
Returns the size of the object name if successful, - or 0 if no name is associated with the group identifier. - Otherwise returns a negative value. -
Fortran90 Interface: -
None. - - - - -
- - - -
-
-
Name: H5Gget_objtype_by_idx -
Signature: -
int H5Gget_objtype_by_idx(hid_t loc_id, - hsize_t idx ) -
Purpose: -
Returns the type of an object specified by an index. -
Description: -
H5Gget_objtype_by_idx returns the type of the object - specified by the index idx in the group loc_id. -

- The group is specified by a group identifier loc_id. - If preferred, a file identifier may be passed in loc_id; - that file's root group will be assumed. -

- idx is the transient index used to iterate through - the objects in the group. - This parameter is described in more detail in the discussion of - H5Gget_objname_by_idx. -

- The object type is returned as the function return value: - - - - - -
-      - - H5G_LINK - - - 0 - - Object is a symbolic link. -
  - - - H5G_GROUP - - - 1 - - Object is a group. -
  - - - H5G_DATASET    - - - 2    - - Object is a dataset. -
  - - - H5G_TYPE - - - 3 - - Object is a named datatype. -
- -

Parameters: -
    - - - - - - -
    hid_t loc_id    IN: Group or file identifier.
    hsize_t idxIN: Transient index identifying object.
-
Returns: -
Returns the type of the object if successful. - Otherwise returns a negative value. -
Fortran90 Interface: -
None. - - - - -
- - - -
-
-
Name: H5Giterate -
Signature: -
int H5Giterate(hid_t loc_id, - const char *name, - int *idx, - H5G_iterate_t operator, - void *operator_data - ) -
Purpose: -
Iterates an operation over the entries of a group. -
Description: -
H5Giterate iterates over the members of - name in the file or group specified with - loc_id. - For each object in the group, the operator_data - and some additional information, specified below, are - passed to the operator function. - The iteration begins with the idx object in the - group and the next element to be processed by the operator is - returned in idx. If idx - is NULL, then the iterator starts at the first group member; - since no stopping point is returned in this case, the iterator - cannot be restarted if one of the calls to its operator returns - non-zero. -

- The prototype for H5G_iterate_t is: - - - - -
    typedef herr_t (*H5G_iterate_t) - (hid_t group_id, const char * - member_name, void *operator_data);
- -

The operation receives the group identifier for the group being - iterated over, group_id, the name of the current - object within the group, member_name, and the - pointer to the operator data passed in to H5Giterate, - operator_data. -

- The return values from an operator are: -

    -
  • Zero causes the iterator to continue, returning - zero when all group members have been processed. -
  • Positive causes the iterator to immediately return that positive - value, indicating short-circuit success. The iterator can be - restarted at the next group member. -
  • Negative causes the iterator to immediately return that value, - indicating failure. The iterator can be restarted at the next - group member. -
-

- H5Giterate assumes that the membership of the group - identified by name remains unchanged through the - iteration. If the membership changes during the iteration, - the function's behavior is undefined. -

Parameters: -
    - - - - - - - - - - - - - - - -
    hid_t loc_idIN: File or group identifier.
    const char *nameIN: Group over which the iteration is performed.
    int *idxIN/OUT: Location at which to begin the iteration.
    H5G_iterate_t operator    IN: Operation to be performed on an object at each step of - the iteration.
    void *operator_dataIN/OUT: Data associated with the operation.
-
Returns: -
Returns the return value of the last operator if it was non-zero, - or zero if all group members were processed. - Otherwise returns a negative value. - -
Fortran90 Interface: -
There is no direct FORTRAN couterpart for the C function - H5Giterate. - Instead, that functionality is provided by two FORTRAN functions: - -
- - - - - - - - -
- h5gn_members_f -    - Purpose: - Returns the number of group members. -
- h5gget_obj_info_idx_f -    - Purpose: - Returns name and type of the group member identified by its index. -
-
- -
-SUBROUTINE h5gn_members_f(loc_id, name, nmembers, hdferr)           
-  IMPLICIT NONE
-  INTEGER(HID_T), INTENT(IN) :: loc_id        ! File or group identifier 
-  CHARACTER(LEN=*), INTENT(IN) :: name        ! Name of the group 
-  INTEGER, INTENT(OUT) :: nmembers            ! Number of members in the group
-  INTEGER, INTENT(OUT) :: hdferr              ! Error code 
-                                              ! 0 on success and -1 on failure
-END SUBROUTINE h5gn_members_f
-		
- -
-SUBROUTINE h5gget_obj_info_idx_f(loc_id, name, idx, & 
-                                 obj_name, obj_type, hdferr)           
-  IMPLICIT NONE
-  INTEGER(HID_T), INTENT(IN) :: loc_id        ! File or group identifier 
-  CHARACTER(LEN=*), INTENT(IN) :: name        ! Name of the group 
-  INTEGER, INTENT(IN) :: idx                  ! Index of member object 
-  CHARACTER(LEN=*), INTENT(OUT) :: obj_name   ! Name of the object 
-  INTEGER, INTENT(OUT) :: obj_type            ! Object type : 
-                                              !     H5G_LINK_F 
-                                              !     H5G_GROUP_F 
-                                              !     H5G_DATASET_F 
-                                              !     H5G_TYPE_F 
-  INTEGER, INTENT(OUT) :: hdferr              ! Error code 
-                                              ! 0 on success and -1 on failure
-END SUBROUTINE h5gget_obj_info_idx_f
-		
-
- - - -
-
-
Name: H5Glink -
Signature: -
herr_t H5Glink(hid_t loc_id, - H5G_link_t link_type, - const char *current_name, - const char *new_name - ) -
Purpose: -
Creates a link of the specified type from new_name - to current_name. -
Description: -
H5Glink creates a new name for an object that has some current - name, possibly one of many names it currently has. -

- If link_type is H5G_LINK_HARD, then - current_name must specify the name of an - existing object and both - names are interpreted relative to loc_id, which is - either a file identifier or a group identifier. -

- If link_type is H5G_LINK_SOFT, then - current_name can be anything and is interpreted at - lookup time relative to the group which contains the final - component of new_name. For instance, if - current_name is ./foo, - new_name is ./x/y/bar, and a request - is made for ./x/y/bar, then the actual object looked - up is ./x/y/./foo. -

Parameters: -
    - - - - - - - - - - - - -
    hid_t loc_idIN: File or group identifier.
    H5G_link_t link_typeIN: Link type. - Possible values are H5G_LINK_HARD and - H5G_LINK_SOFT.
    const char * current_name    IN: Name of the existing object if link is a hard link. - Can be anything for the soft link.
    const char * new_nameIN: New name for the object.
-
Returns: -
Returns a non-negative value if successful; - otherwise returns a negative value. -
Fortran90 Interface: h5glink_f -
-
-SUBROUTINE h5glink_f(loc_id, link_type, current_name, new_name, hdferr) 
-  IMPLICIT NONE
-  INTEGER(HID_T), INTENT(IN) :: loc_id      ! File or group location identifier
-  INTEGER, INTENT(IN)        :: link_type   ! Link type, possible values are:
-                                            !     H5G_LINK_HARD_F
-                                            !     H5G_LINK_SOFT_F
-  CHARACTER(LEN=*), INTENT(IN) :: current_name
-                                            ! Current object name relative
-                                            ! to loc_id 
-  CHARACTER(LEN=*), INTENT(IN) :: new_name  ! New object name 
-  INTEGER, INTENT(OUT) :: hdferr            ! Error code
-
-END SUBROUTINE h5glink_f
-	
- - -
- - - -
-
-
Name: H5Glink2 -
Signature: - -
herr_t H5Glink2( - hid_t curr_loc_id, const char *current_name, - H5G_link_t link_type, - hid_t new_loc_id, const char *new_name ) -
Purpose: -
Creates a link of the specified type from new_name - to current_name. -
Description: - -
H5Glink2 creates a new name for an object that has some current - name, possibly one of many names it currently has. -

- If link_type is H5G_LINK_HARD, then current_name - must specify the name of an existing object. - In this case, current_name and new_name are interpreted - relative to curr_loc_id and new_loc_id, respectively, - which are either file or group identifiers. -

- If link_type is H5G_LINK_SOFT, then - current_name can be anything and is interpreted at - lookup time relative to the group which contains the final - component of new_name. For instance, if - current_name is ./foo, - new_name is ./x/y/bar, and a request - is made for ./x/y/bar, then the actual object looked - up is ./x/y/./foo. -

Parameters: -
    - - - - - - - - - - - - - - -
    hid_t curr_loc_idIN: The file or group identifier for the original object.
    const char * current_name    IN: Name of the existing object if link is a hard link. - Can be anything for the soft link.
    H5G_link_t link_typeIN: Link type. - Possible values are H5G_LINK_HARD and - H5G_LINK_SOFT.
    hid_t new_loc_idIN: The file or group identifier for the new link.
    const char * new_nameIN: New name for the object.
-
Returns: -
Returns a non-negative value if successful; - otherwise returns a negative value. -
Fortran90 Interface: h5glink2_f -
-
-SUBROUTINE h5glink2_f(cur_loc_id, cur_name, link_type, new_loc_id, new_name, hdferr) 
-  IMPLICIT NONE
-  INTEGER(HID_T), INTENT(IN) :: cur_loc_id ! File or group location identifier
-  CHARACTER(LEN=*), INTENT(IN) :: cur_name ! Name of the existing object
-                                           ! is relative to cur_loc_id 
-                                           ! Can be anything for the soft link
-  INTEGER, INTENT(IN) :: link_type         ! Link type, possible values are:
-                                           !     H5G_LINK_HARD_F
-                                           !     H5G_LINK_SOFT_F
-  INTEGER(HID_T), INTENT(IN) :: new_loc_id ! New location identifier
-  CHARACTER(LEN=*), INTENT(IN) :: new_name ! New object name 
-  INTEGER, INTENT(OUT) :: hdferr           ! Error code
-
-END SUBROUTINE h5glink2_f
-	
- - -
- - - -
-
-
Name: H5Gmove -
Signature: -
herr_t H5Gmove(hid_t loc_id, - const char *src_name, - const char *dst_name - ) -
Purpose: -
Renames an object within an HDF5 file. -
Description: -
H5Gmove renames an object within an HDF5 file. - The original name, src_name, is unlinked from the - group graph and the new name, dst_name, is inserted - as an atomic operation. Both names are interpreted relative - to loc_id, which is either a file or a group - identifier. -
Warning: -
Exercise care in moving groups as it is possible to render data in - a file inaccessible with H5Gmove. - See The Group Interface - in the HDF5 User's Guide. -
Parameters: -
    - - - - - - - - - -
    hid_t loc_idIN: File or group identifier.
    const char *src_name    IN: Object's original name.
    const char *dst_nameIN: Object's new name.
-
Returns: -
Returns a non-negative value if successful; - otherwise returns a negative value. -
Fortran90 Interface: h5gmove_f -
-
-SUBROUTINE h5gmove_f(loc_id, name, new_name, hdferr)
-  IMPLICIT NONE
-  INTEGER(HID_T), INTENT(IN) :: loc_id     ! File or group identifier
-  CHARACTER(LEN=*), INTENT(IN) :: name     ! Original name of an object 
-  CHARACTER(LEN=*), INTENT(IN) :: new_name ! New name of an object 
-  INTEGER, INTENT(OUT) :: hdferr           ! Error code 
-                                           ! 0 on success and -1 on failure
-END SUBROUTINE h5gmove_f
-	
- - -
- - - -
-
-
Name: H5Gmove2 -
Signature: -
herr_t H5Gmove2( hid_t src_loc_id, - const char *src_name, hid_t dst_loc_id, - const char *dst_name ) -
Purpose: -
Renames an object within an HDF5 file. -
Description: -
H5Gmove2 renames an object within an HDF5 file. The original - name, src_name, is unlinked from the group graph and the new - name, dst_name, is inserted as an atomic operation. -

-

src_name and dst_name are interpreted relative to - src_name and dst_name, respectively, - which are either file or group identifiers. -
Warning: -
Exercise care in moving groups as it is possible to render data in a file - inaccessible with H5Gmove. See The - Group Interface in the HDF5 User's Guide. -
Parameters: -
    - - - - - - - - - - - - -
    hid_t src_loc_idIN: Original file or group identifier.
    const char *src_name    IN: Object's original name.
    hid_t dst_loc_idIN: Destination file or group identifier.
    const char *dst_nameIN: Object's new name.
-
Returns: -
Returns a non-negative value if successful; otherwise returns a negative - value. -
Fortran90 Interface: h5gmove2_f -
-
-SUBROUTINE h5gmove2_f(src_loc_id, src_name, dst_loc_id, dst_name, hdferr)
-  IMPLICIT NONE
-  INTEGER(HID_T), INTENT(IN) :: src_loc_id   ! File or group identifier
-  CHARACTER(LEN=*), INTENT(IN) :: src_name   ! Original name of an object 
-                                             ! relative to src_loc_id 
-  INTEGER(HID_T), INTENT(IN) :: dst_loc_id   ! File or group identifier
-  CHARACTER(LEN=*), INTENT(IN) :: dst_name   ! New name of an object
-                                             ! relative to dst_loc_id 
-  INTEGER, INTENT(OUT) :: hdferr             ! Error code 
-                                             ! 0 on success and -1 on failure
-END SUBROUTINE h5gmove2_f
-	
- - -
- - - -
-
-
Name: H5Gopen -
Signature: -
hid_t H5Gopen(hid_t loc_id, - const char *name - ) -
Purpose: -
Opens an existing group for modification and returns a group - identifier for that group. -
Description: -
H5Gopen opens an existing group with the specified - name at the specified location, loc_id. -

- The location is identified by a file or group identifier -

- H5Gopen returns a group identifier for the group - that was opened. This group identifier should be released by - calling H5Gclose when it is no longer needed. -

Parameters: -
    - - - - - - -
    hid_t loc_idIN: File or group identifier within which group is to be open.
    const char * name    IN: Name of group to open.
-
Returns: -
Returns a valid group identifier if successful; - otherwise returns a negative value. -
Fortran90 Interface: h5gopen_f -
-
-SUBROUTINE h5gopen_f(loc_id, name, gr_id, hdferr)
-  IMPLICIT NONE
-  INTEGER(HID_T), INTENT(IN) :: loc_id   ! File or group identifier
-  CHARACTER(LEN=*), INTENT(IN) :: name   ! Name of the group to open 
-  INTEGER(HID_T), INTENT(OUT) :: gr_id   ! Group identifier
-  INTEGER, INTENT(OUT) :: hdferr         ! Error code 
-                                         ! 0 on success and -1 on failure
-END SUBROUTINE h5gopen_f
-	
- - -
- - - -
-
-
Name: H5Gset_comment -
Signature: -
herr_t H5Gset_comment(hid_t loc_id, - const char *name, - const char *comment - ) -
Purpose: -
Sets comment for specified object. -
Description: -
H5Gset_comment sets the comment for the - object specified by loc_id and name - to comment. - Any previously existing comment is overwritten. -

- If comment is the empty string or a - null pointer, the comment message is removed from the object. -

- Comments should be relatively short, null-terminated, - ASCII strings. -

- Comments can be attached to any object that has an object header, - e.g., datasets, groups, named datatypes, and dataspaces, but - not symbolic links. -

Parameters: -
    - - - - - - - - - -
    hid_t loc_idIN: Identifier of the file, group, dataset, - or named datatype.
    const char *nameIN: Name of the object whose comment is to be - set or reset. -
    - name can be '.' (dot) if loc_id - fully specifies the object for which the comment is to be set. -
    - name is ignored if loc_id - is a dataset or named datatype. -
    const char *comment    IN: The new comment.
-
Returns: -
Returns a non-negative value if successful; - otherwise returns a negative value. -
Fortran90 Interface: h5gset_comment_f -
-
-SUBROUTINE h5gset_comment_f(loc_id, name, comment, hdferr)
-  IMPLICIT NONE
-  INTEGER(HID_T), INTENT(IN) :: loc_id      ! File, group, dataset, or 
-                                            ! named datatype identifier
-  CHARACTER(LEN=*), INTENT(IN) :: name      ! Name of object 
-  CHARACTER(LEN=*), INTENT(IN) :: comment   ! Comment for the object 
-  INTEGER, INTENT(OUT) :: hdferr            ! Error code 
-                                            ! 0 on success and -1 on failure
-END SUBROUTINE h5gset_comment_f
-	
- - -
- - - -
-
-
Name: H5Gunlink -
Signature: -
herr_t H5Gunlink(hid_t loc_id, - const char *name - ) -
Purpose: -
Removes the link to an object from a group. -
Description: -
H5Gunlink removes the object specified by - name from the group graph and decrements the - link count for the object to which name points. - This action eliminates any association between name - and the object to which name pointed. -

- Object headers keep track of how many hard links refer to an object; - when the link count reaches zero, the object can be removed - from the file. Objects which are open are not removed until all - identifiers to the object are closed. -

- If the link count reaches zero, all file space associated with - the object will be released, i.e., identified in memory as freespace. - If the any object identifier is open for the object, the space - will not be released until after the object identifier is closed. -

- Note that space identified as freespace is available for re-use - only as long as the file remains open; once a file has been - closed, the HDF5 library loses track of freespace. See - “Freespace Management” - in the HDF5 User's Guide for further details. -

Warning: -
Exercise care in unlinking groups as it is possible to render data in - a file inaccessible with H5Gunlink. - See The Group Interface - in the HDF5 User's Guide. -
Parameters: -
    - - - - - - -
    hid_t loc_idIN: Identifier of the file or group containing the object.
    const char * name    IN: Name of the object to unlink.
-
Returns: -
Returns a non-negative value if successful; - otherwise returns a negative value. -
Fortran90 Interface: h5gunlink_f -
-
-SUBROUTINE h5gunlink_f(loc_id, name, hdferr)
-  IMPLICIT NONE
-  INTEGER(HID_T), INTENT(IN) :: loc_id   ! File or group identifier
-  CHARACTER(LEN=*), INTENT(IN) :: name   ! Name of the object to unlink 
-  INTEGER, INTENT(OUT) :: hdferr         ! Error code 
-                                         ! 0 on success and -1 on failure
-END SUBROUTINE h5gunlink_f
-	
- - -
- -
-
- - - -
-HDF5 documents and links 
-Introduction to HDF5 
-HDF5 User Guide 
- -
-And in this document, the -HDF5 Reference Manual   -
-H5IM   -H5LT   -H5PT   -H5TB   -
-H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5I   -H5P   -
-H5R   -H5S   -H5T   -H5Z   -Tools   -Datatypes   -
-
-
-HDF Help Desk -
-Describes HDF5 Release 1.7, the unreleased development branch; working toward HDF5 Release 1.8.0 -
- - - diff --git a/doc/html/RM_H5I.html b/doc/html/RM_H5I.html deleted file mode 100644 index 4f5e731..0000000 --- a/doc/html/RM_H5I.html +++ /dev/null @@ -1,1187 +0,0 @@ - - -HDF5/H5I API Specification - - - - - - - - - - - - - -
-
- - - -
-HDF5 documents and links 
-Introduction to HDF5 
-HDF5 User Guide 
- -
-And in this document, the -HDF5 Reference Manual   -
-H5IM   -H5LT   -H5PT   -H5TB   -
-H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5I   -H5P   -
-H5R   -H5S   -H5T   -H5Z   -Tools   -Datatypes   -
-
-
-

H5I: Identifier Interface

-
- -

Identifier API Functions

- -These functions provides tools for working with object identifiers and -object names. - - -

-The C Interface: - - - -
- -       - -       - -
-
- - -Alphabetical Listing - - - - - - - - - - - - - -
- -        - -        - -
- -
-The FORTRAN90 Interfaces: -
-In general, each FORTRAN90 subroutine performs exactly the same task -as the corresponding C function. -
- - - -
- -       - -       - -
- - - - - -


-
-
Name: H5Iclear_type -
Signature: -
herr_t H5Iclear_type(H5I_type_t type, - hbool_t force) -
Purpose: -
Deletes all IDs of the given type -
Description: -
H5Iclear_type deletes all IDs of the type identified by the argument type. -

- The type’s free function is first called on all of these IDs to free their memory, - then they are removed from the type. - -

- If the force flag is set to false, only those IDs whose reference - counts are equal to 1 will be deleted, and all other IDs will be entirely unchanged. - If the force flag is true, all IDs of this type will be deleted. -

Parameters: -
    - - - - - - - - - -
    H5I_type_t type    IN: Identifier of ID type which is to be cleared of IDs
    hbool_t forceIN: Whether or not to force deletion of all IDs
    -
-
Returns: -
Returns non-negative on success, negative on failure. -
Fortran90 Interface: -
This function is not supported in FORTRAN 90. - - -
- - - -
-
-
Name: H5Idec_ref -
Signature: -
int H5Idec_ref(hid_t obj_id) -
Purpose: -
Decrements the reference count for an object. -
Description: -
H5Idec_ref decrements the reference count of the object - identified by obj_id. - -

- The reference count for an object ID is attached to the information - about an object in memory and has no relation to the number of links to - an object on disk. - -

- The reference count for a newly created object will be 1. - Reference counts for objects may be explicitly modified with this - function or with H5Iinc_ref. - When an object ID's reference count reaches zero, the object will be - closed. - Calling an object ID's 'close' function decrements the reference count - for the ID which normally closes the object, but - if the reference count for the ID has been incremented with - H5Iinc_ref, the object will only be closed when the - reference count - reaches zero with further calls to this function or the - object ID's 'close' function. - -

- If the object ID was created by a collective parallel call (such as - H5Dcreate, H5Gopen, etc.), the reference - count should be modified by all the processes which have copies of - the ID. Generally this means that group, dataset, attribute, file - and named datatype IDs should be modified by all the processes and - that all other types of IDs are safe to modify by individual processes. - -

- This function is of particular value when an application is maintaining - multiple copies of an object ID. The object ID can be incremented when - a copy is made. Each copy of the ID can then be safely closed or - decremented and the HDF5 object will be closed when the reference count - for that that object drops to zero. -

Parameters: -
    - - - -
    hid_t obj_id    IN: Object identifier whose reference count will be modified.
-
Returns: -
Returns a non-negative reference count of the object ID after - decrementing it if successful; otherwise a negative value is returned. -
Fortran90 Interface: h5idec_ref_f -
-
-SUBROUTINE h5idec_ref_f(obj_id, ref_count, hdferr) 
-  IMPLICIT NONE
-  INTEGER(HID_T), INTENT(IN) :: obj_id  !Object identifier 
-  INTEGER, INTENT(OUT) :: ref_count     !Reference count of object ID
-  INTEGER, INTENT(OUT) :: hdferr        ! Error code
-                                        ! 0 on success, and -1 on failure
-END SUBROUTINE h5idec_ref_f
-	
- - -
- - - -
-
-
Name: H5Idec_type_ref -
Signature: -
int H5Idec_type_ref(H5I_type_t type) -
Purpose: -
Decrements the reference count on an ID type. -
Description: -
H5Idec_type_ref decrements the reference count on an ID type. - The reference count is used by the library to indicate when an ID type can - be destroyed. If the reference count reaches zero, this function will destroy it. - -

The type parameter is the identifier for the ID type whose - reference count is to be decremented. This identifier must have been - created by a call to H5Iregister_type. -

Parameters: -
    - - - - - -
    H5I_type_t type    IN: The identifier of the type whose reference count is to be decremented
    -
-
Returns: -
Returns the current reference count on success, negative on failure. -
Fortran90 Interface: -
This function is not supported in FORTRAN 90. - - -
- - - -
-
-
Name: H5Idestroy_type -
Signature: -
herr_t H5Idestroy_type(H5I_type_t type) -
Purpose: -
Removes the type type and all IDs within that type. -
Description: -
H5Idestroy_type deletes an entire ID type. All IDs of this - type are destroyed and no new IDs of this type can be registered. - -

- The type’s free function is called on all of the IDs which are deleted by - this function, freeing their memory. In addition, all memory used by this - type’s hash table is freed. - -

- Since the H5I_type_t values of destroyed ID types are reused - when new types are registered, it is a good idea to set the variable - holding the value of the destroyed type to H5I_UNINIT. -

Parameters: -
    - - - - - -
    H5I_type_t type    IN: Identifier of ID type which is to be destroyed
    -
-
Returns: -
Returns non-negative on success, negative on failure. -
Fortran90 Interface: -
This function is not supported in FORTRAN 90. - - -
- - - -
-
-
Name: H5Iget_file_id -
Signature: -
hid_t H5Iget_file_id(hid_t obj_id) -
Purpose: -
Retrieves an identifier for the file containing the specified object. -
Description: -
H5Iget_file_id returns the identifier of the file - associated with the object referenced by obj_id. -

- obj_id can be a file, group, dataset, named datatype, - or attribute identifier. -

- Note that the HDF5 Library permits an application to close a file - while objects within the file remain open. - If the file containing the object obj_id - is still open, H5Iget_file_id will retrieve the - existing file identifier. - If there is no existing file identifier for the file, - i.e., the file has been closed, - H5Iget_file_id will reopen the file and - return a new file identifier. - In either case, the file identifier must eventually be released - using H5Fclose. -

Parameters: -
    - - - - - -
    hid_t obj_id    IN: Identifier of the object whose associated - file identifier will be returned.
    -
-
Returns: -
Returns a file identifier on success, negative on failure. -
Fortran90 Interface: -
-
-SUBROUTINE h5iget_file_id_f(obj_id, file_id, hdferr) 
-
-  IMPLICIT NONE
-  INTEGER(HID_T), INTENT(IN)  :: obj_id     ! Object identifier 
-  INTEGER(HID_T), INTENT(OUT) :: file_id    ! File identifier
-  INTEGER, INTENT(OUT) :: hdferr            ! Error code
-
-END SUBROUTINE h5iget_file_id_f
-    
- - -
- - - -
-
-
Name: H5Iget_name -
Signature: -
ssize_t H5Iget_name(hid_t obj_id, - char *name, - size_t size - ) -
Purpose:
-
Retrieves a name of an object based on the object identifier. -
Description: -
H5Iget_name retrieves a name for the object identified - by obj_id. -

- Up to size characters of the name are returned in - name; additional characters, if any, are not returned - to the user application. -

- If the length of the name, which determines the required - value of size, is unknown, a preliminary - H5Iget_name call can be made. - The return value of this call will be the size of the - object name. - That value can then be assigned to size - for a second H5Iget_name call, - which will retrieve the actual name. -

- If there is no name associated with the object identifier - or if the name is NULL, H5Iget_name - returns 0 (zero). -

- Note that an object in an HDF5 file may have multiple names, - varying according to the path through the HDF5 group - hierarchy used to reach that object. -

Parameters: -
    - - - - - - - - - -
    hid_t obj_id    IN: Identifier of the object. - This identifier can refer to a group, dataset, or named datatype.
    char *nameOUT: A name associated with the identifier.
    size_t sizeIN: The size of the name buffer.
-
Returns: -
Returns the length of the name if successful, - returning 0 (zero) if no name is associated with the identifier. - Otherwise returns a negative value. -
Fortran90 Interface: h5iget_name_f -
-
-SUBROUTINE h5iget_name_f(obj_id, buf, buf_size, hdferr) 
-  IMPLICIT NONE
-  INTEGER(HID_T), INTENT(IN)    :: obj_id     ! Object identifier 
-  CHARACTER(LEN=*), INTENT(OUT) :: buf        ! Buffer to hold object name 
-  INTEGER(SIZE_T), INTENT(IN)   :: buf_size   ! Buffer size
-  INTEGER(SIZE_T), INTENT(OUT)  :: name_size  ! Name size
-  INTEGER, INTENT(OUT) :: hdferr              ! Error code
-                                              ! 0 on success, and -1 on failure
-END SUBROUTINE h5iget_name_f
-	
- - -
- - - -
-
-
Name: H5Iget_ref -
Signature: -
int H5Iget_ref(hid_t obj_id) -
Purpose: -
Retrieves the reference count for an object. -
Description: -
H5Iget_ref retrieves the reference count of the object - identified by obj_id. - -

- The reference count for an object ID is attached to the information - about an object in memory and has no relation to the number of links to - an object on disk. - -

- This function can also be used to check if an object ID is still valid. - A non-negative return value from this function indicates that the ID - is still valid. -

Parameters: -
    - - - -
    hid_t obj_id    IN: Object identifier whose reference count will be retrieved.
-
Returns: -
Returns a non-negative current reference count of the object ID - if successful; otherwise a negative value is returned. -
Fortran90 Interface: h5iget_ref_f -
-
-SUBROUTINE h5iget_ref_f(obj_id, ref_count, hdferr) 
-  IMPLICIT NONE
-  INTEGER(HID_T), INTENT(IN) :: obj_id  !Object identifier 
-  INTEGER, INTENT(OUT) :: ref_count     !Reference count of object ID
-  INTEGER, INTENT(OUT) :: hdferr        ! Error code
-                                        ! 0 on success, and -1 on failure
-END SUBROUTINE h5iget_ref_f
-	
- - -
- - - -
-
-
Name: H5Iget_type -
Signature: -
H5I_type_t H5Iget_type(hid_t obj_id) -
Purpose: -
Retrieves the type of an object. -
Description: -
H5Iget_type retrieves the type of the object - identified by obj_id. -

- Valid types returned by the function are - - - - - - - -
    H5I_FILE - File
    H5I_GROUP - Group
    H5I_DATATYPE - Datatype
    H5I_DATASPACE - Dataspace
    H5I_DATASET - Dataset
    H5I_ATTR - Attribute
- If no valid type can be determined or the identifier - submitted is invalid, the function returns - - -
    H5I_BADID - Invalid identifier
-

- This function is of particular value in determining the - type of object closing function (H5Dclose, - H5Gclose, etc.) to call after a call to - H5Rdereference. -

Parameters: -
    - - - -
    hid_t obj_id    IN: Object identifier whose type is to be determined.
-
Returns: -
Returns the object type if successful; - otherwise H5I_BADID. -
Fortran90 Interface: h5iget_type_f -
-
-SUBROUTINE h5iget_type_f(obj_id, type, hdferr) 
-  IMPLICIT NONE
-  INTEGER(HID_T), INTENT(IN) :: obj_id  !Object identifier 
-  INTEGER, INTENT(OUT) :: type          !type of an object. 
-                                        !possible values are:
-                                        !H5I_FILE_F
-                                        !H5I_GROUP_F
-                                        !H5I_DATATYPE_F
-                                        !H5I_DATASPACE_F
-                                        !H5I_DATASET_F
-                                        !H5I_ATTR_F
-                                        !H5I_BADID_F
-  INTEGER, INTENT(OUT) :: hdferr        ! E rror code
-                                        ! 0 on success, and -1 on failure
-END SUBROUTINE h5iget_type_f
-	
- - -
- - - -
-
-
Name: H5Iget_type_ref -
Signature: -
int H5Iget_type_ref(H5I_type_t type) -
Purpose: -
Retrieves the reference count on an ID type. -
Description: -
H5Iget_type_ref retrieves the reference count on an ID type. - The reference count is used by the library to indicate when an - ID type can be destroyed. - -

- The type parameter is the identifier for the ID type whose - reference count is to be retrieved. This identifier must have been created - by a call to H5Iregister_type. -

Parameters: -
    - - - - - -
    H5I_type_t type    IN: The identifier of the type whose reference count is to be retrieved
    -
-
Returns: -
Returns the current reference count on success, negative on failure. -
Fortran90 Interface: -
This function is not supported in FORTRAN 90. - - -
- - - -
-
-
Name: H5Iinc_ref -
Signature: -
int H5Iinc_ref(hid_t obj_id) -
Purpose: -
Increments the reference count for an object. -
Description: -
H5Iinc_ref increments the reference count of the object - identified by obj_id. - -

- The reference count for an object ID is attached to the information - about an object in memory and has no relation to the number of links to - an object on disk. - -

- The reference count for a newly created object will be 1. - Reference counts for objects may be explicitly modified with this - function or with H5Idec_ref. - When an object ID's reference count reaches zero, the object will be - closed. - Calling an object ID's 'close' function decrements the reference count - for the ID which normally closes the object, but - if the reference count for the ID has been incremented with this - function, the object will only be closed when the reference count - reaches zero with further calls to H5Idec_ref or the - object ID's 'close' function. - -

- If the object ID was created by a collective parallel call (such as - H5Dcreate, H5Gopen, etc.), the reference - count should be modified by all the processes which have copies of - the ID. Generally this means that group, dataset, attribute, file - and named datatype IDs should be modified by all the processes and - that all other types of IDs are safe to modify by individual processes. - -

- This function is of particular value when an application is maintaining - multiple copies of an object ID. The object ID can be incremented when - a copy is made. Each copy of the ID can then be safely closed or - decremented and the HDF5 object will be closed when the reference count - for that that object drops to zero. -

Parameters: -
    - - - -
    hid_t obj_id    IN: Object identifier whose reference count will be modified.
-
Returns: -
Returns a non-negative reference count of the object ID after - incrementing it if successful; otherwise a negative value is returned. -
Fortran90 Interface: h5iinc_ref_f -
-
-SUBROUTINE h5iinc_ref_f(obj_id, ref_count, hdferr) 
-  IMPLICIT NONE
-  INTEGER(HID_T), INTENT(IN) :: obj_id  !Object identifier 
-  INTEGER, INTENT(OUT) :: ref_count     !Reference count of object ID
-  INTEGER, INTENT(OUT) :: hdferr        ! Error code
-                                        ! 0 on success, and -1 on failure
-END SUBROUTINE h5iinc_ref_f
-	
- - -
- - - -
-
-
Name: H5Iinc_type_ref -
Signature: -
int H5Iinc_type_ref(H5I_type_t type) -
Purpose: -
Increments the reference count on an ID type. -
Description: -
H5Iinc_type_ref increments the reference count on an ID type. - The reference count is used by the library to indicate when an ID type can be destroyed. - -

- The type parameter is the identifier for the ID type whose - reference count is to be incremented. This identifier must have been created - by a call to H5Iregister_type. -

Parameters: -
    - - - - - -
    H5I_type_t type    IN: The identifier of the type whose reference count is to be incremented
    -
-
Returns: -
Returns the current reference count on success, negative on failure. -
Fortran90 Interface: -
This function is not supported in FORTRAN 90. - - -
- - - -
-
-
Name: H5Inmembers -
Signature: -
int H5Inmembers(H5I_type_t type) -
Purpose: -
Returns the number of IDs in a given type. -
Description: -
H5Inmembers returns the number of IDs of a given ID type. - If no IDs of this type have been registered, H5Inmembers returns 0. - If the type does not exist or has been destroyed, H5Inmembers - also returns 0. -
Parameters: -
    - - - - - -
    H5I_type_t type    IN: Identifier of ID type whose member count will be retrieved
    -
-
Returns: -
Returns number of members on success, zero if type is not a valid ID type. -
Fortran90 Interface: -
This function is not supported in FORTRAN 90. - - -
- - - -
-
-
Name: H5Iobject_verify -
Signature: -
void * H5Iobject_verify(hid_t id, - H5I_type_t id_type) -
Purpose: -
Returns the object referenced by id. -
Description: -
H5Iobject_verify returns a pointer to the memory referenced by - id after verifying that id is of type id_type. - This function is analogous to dereferencing a pointer in C with type checking. - -

- H5Iregister(H5I_type_t type, - void *object) takes an H5I_type_t and a - void pointer to an object, returning an hid_t of that type. - This hid_t can then be passed to H5Iobject_verify - along with its type to retrieve the object. - -

- H5Iobject_verify does not change the ID it is called on in any - way (as opposed to H5Iremove_verify, which removes the ID from its - type’s hash table). -

Parameters: -
    - - - - - - - - - -
    hid_t idIN: ID to be dereferenced
    H5I_type_t type    IN: ID type to which id should belong
    -
-
Returns: -
Pointer to the object referenced by id on success, NULL on failure. -
Fortran90 Interface: -
This function is not supported in FORTRAN 90. - - -
- - - -
-
-
Name: H5Iregister -
Signature: -
hid_t H5Iregister(H5I_type_t type, - void *object) -
Purpose: -
Creates and returns a new ID. -
Description: -
H5Iregister allocates space for a new ID and returns an identifier for it. - -

- The type parameter is the identifier for the ID type to which - this new ID will belong. This identifier must have been created by a call - to H5Iregister_type. - -

- The object parameter is a pointer to the memory which the new - ID will be a reference to. This pointer will be stored by the library and - returned to you via a call to H5Iobject_verify. -

Parameters: -
    - - - - - - - - - -
    H5I_type_t type    IN: The identifier of the type to which the new ID will belong
    void *objectIN: Pointer to memory for the library to store
    -
-
Returns: -
Returns the new ID on success, negative on failure. -
Fortran90 Interface: -
This function is not supported in FORTRAN 90. - - -
- - - -
-
-
Name: H5Iregister_type -
Signature: -
H5I_type_t H5Iregister_type(size_t - hash_size, unsigned reserved, - H5I_free_t free_func) -
Purpose: -
Creates and returns a new ID type. -
Description: -
H5Iregister_type allocates space for a new ID type and - returns an identifier for it. - -

- The hash_size parameter indicates the minimum size of the hash - table used to store IDs in the new type. - -

- The reserved parameter indicates the number of IDs in this new - type to be reserved. Reserved IDs are valid IDs which are not associated with - any storage within the library. - -

- The free_func parameter is a function pointer to a function - which returns an herr_t and accepts a void *. The purpose - of this function is to deallocate memory for a single ID. It will be called - by H5Iclear_type and H5Idestroy_type on each ID. - This function is NOT called by H5Iremove_verify. - The void * will be the same pointer which was passed in to - the H5Iregister function. The free_func - function should return 0 on success and -1 on failure. -

Parameters: -
    - - - - - - - - - - - - - -
    size_t hash_sizeIN: Size of the hash table (in entries) used to store IDs for the new type
    unsigned reservedIN: Number of reserved IDs for the new type
    H5I_free_t free_func    IN: Function used to deallocate space for a single ID
    -
-
Returns: -
Returns the type identifier on success, negative on failure. -
Fortran90 Interface: -
This function is not supported in FORTRAN 90. - - -
- - - -
-
-
Name: H5Iremove_verify -
Signature: -
void *H5Iremove_verify(hid_t id, - H5I_type_t id_type) -
Purpose: -
Removes an ID from internal storage. -
Description: -
H5Iremove_verify first ensures that id belongs to - id_type. If so, it removes id from internal storage - and returns the pointer to the memory it referred to. This pointer is the - same pointer that was placed in storage by H5Iregister. - If id does not belong to id_type, - then NULL is returned. - -

- The id parameter is the ID which is to be removed from - internal storage. Note: this function does NOT deallocate the memory that - id refers to. The pointer returned by H5Iregister - must be deallocated by the user to avoid memory leaks. - -

- The type parameter is the identifier for the ID type - which id is supposed to belong to. This identifier must - have been created by a call to H5Iregister_type. -

Parameters: -
    - - - - - - - - - -
    hid_t idIN: The ID to be removed from internal storage
    H5I_type_t type    IN: The identifier of the type whose reference count is to be retrieved
    -
-
Returns: -
Returns a pointer to the memory referred to by id - on success, NULL on failure. -
Fortran90 Interface: -
This function is not supported in FORTRAN 90. - - -
- - - -
-
-
Name: H5Isearch -
Signature: -
void *H5Isearch(H5I_type_t type, - H5I_search_func_t func, void *key) -
Purpose: -
Finds the memory referred to by an ID within the given ID type such that - some criterion is satisfied. -
Description: -
H5Isearch searches through a give ID type to find an object - that satisfies the criteria defined by func. If such an object - is found, the pointer to the memory containing this object is returned. - Otherwise, NULL is returned. To do this, func is - called on every member of type. The first member to satisfy - func is returned. - -

- The type parameter is the identifier for the ID type which is - to be searched. This identifier must have been created by a call to - H5Iregister_type. - -

- The parameter func is a function pointer to a function - which takes three parameters. The first parameter is a void *. - It will be a pointer the object to be tested. This is the same object - that was placed in storage using H5Iregister. The second - parameter is a hid_t. It is the ID of the object to be tested. - The last parameter is a void *. This is the key parameter - and can be used however the user finds helpful. Or it can simply be ignored - if it is not needed. func returns 0 if the object it is testing - does not pass its criteria. A non-zero value should be returned if the object - does pass its criteria. - -

- The key parameter will be passed to the search function as a - parameter. It can be used to further define the search at run-time. -

Parameters: -
    - - - - - - - - - - - - - -
    H5I_type_t typeIN: The identifier of the type to be searched
    H5I_search_func_t func    IN: The function defining the search criteria
    void *keyIN: A key for the search function
    -
-
Returns: -
Returns a pointer to the object which satisfies the search function - on success, NULL on failure. -
Fortran90 Interface: -
This function is not supported in FORTRAN 90. - - -
- -
-
- - - -
-HDF5 documents and links 
-Introduction to HDF5 
-HDF5 User Guide 
- -
-And in this document, the -HDF5 Reference Manual   -
-H5IM   -H5LT   -H5PT   -H5TB   -
-H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5I   -H5P   -
-H5R   -H5S   -H5T   -H5Z   -Tools   -Datatypes   -
-
-
-HDF Help Desk -
-Describes HDF5 Release 1.7, the unreleased development branch; working toward HDF5 Release 1.8.0 -
- - - - - diff --git a/doc/html/RM_H5P.html b/doc/html/RM_H5P.html deleted file mode 100644 index fa67723..0000000 --- a/doc/html/RM_H5P.html +++ /dev/null @@ -1,9783 +0,0 @@ - - -HDF5/H5P API Specification - - - - - - - - - - - - - -
-
- - - -
-HDF5 documents and links 
-Introduction to HDF5 
-HDF5 User Guide 
- -
-And in this document, the -HDF5 Reference Manual   -
-H5IM   -H5LT   -H5PT   -H5TB   -
-H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5I   -H5P   -
-H5R   -H5S   -H5T   -H5Z   -Tools   -Datatypes   -
-
-
-

H5P: Property List Interface

-
-

Property List API Functions

- -These functions manipulate property list objects to allow objects -which require many different parameters to be easily manipulated. - - -

-The C Interfaces: - - - - - - - -
- - General Property List
Operations
- - -

Generic Properties -

- -!-- -

Meta Data Cache Properties -

---> - -
  -
  -
  -
  -
  -
  -
-    ||   - Indicates functions
-           - available only in
-           - the parallel HDF5
-           - library.
- - -
       - -

File Creation Properties -

- -

File Access Properties - - - -
       - - Dataset Creation Properties - - -

Dataset Access, Memory, and
Transfer Properties
-

- -
- - - - - - -  - - File Access Properties -

    -
  • H5Pset_fclose_degree -
  • H5Pget_fclose_degree -
  • H5Pset_fapl_core -
  • H5Pget_fapl_core -
  • H5Pset_fapl_family -
  • H5Pget_fapl_family -
  • H5Pset_family_offset -
  • H5Pget_family_offset -
  • H5Pset_fapl_log -
  • H5Pset_fapl_mpio   || -
  • H5Pget_fapl_mpio   || -
  • H5Pset_fapl_mpiposix   || -
  • H5Pget_fapl_mpiposix   || -
  • H5Pset_fapl_multi -
  • H5Pget_fapl_multi -
  • H5Pset_multi_type -
  • H5Pget_multi_type -
  • H5Pset_fapl_split -
  • H5Pset_fapl_sec2 -
  • H5Pset_fapl_stdio -
  • H5Pset_fapl_stream -
  • H5Pget_fapl_stream - - - - - - - - - - - - - - -
    - -* Functions labeled with an asterisk (*) are provided only for -backwards compatibility with HDF5 Releases 1.4.x. -See further notes in the description of each function. -

    - - -Alphabetical Listing - - - - - - - - - - - - -
    - -        - -        - -

    - ||  Available only in the parallel HDF5 library. -
    - -
    - -The FORTRAN90 Interfaces: - -
    -In general, each FORTRAN90 subroutine performs exactly the same task -as the corresponding C function. -
    - - - -
    - - General Property List Operations - - - -

    Generic Properties -

    - - - - - - - -

    File Creation Properties -

    - - - - - - - -
           - - - File Close Properties - - -

    Dataset Creation Properties -

    - - -
      -
      -
      -
    -||  Available only in the parallel HDF5 library. - - -
           - - - File Access Properties - - - -

    Dataset Access, Memory, and Transfer Properties -

    - -
    - - - -


    -
    -
    Name: H5Pall_filters_avail -
    Signature: -
    htri_t H5Pall_filters_avail(hid_t dcpl_id) -
    Purpose: -
    Verifies that all required filters are available. -
    Description: -
    H5Pall_filters_avail verifies that all of the filters - set in the dataset creation property list dcpl_id are - currently available. -
    Parameters: -
      - - - -
      hid_t dcpl_id    IN: Dataset creation property list identifier.
    -
    Returns: -
    Returns TRUE if all filters are available - and FALSE if one or more is not currently available.
    - Returns FAIL, a negative value, on error. -
    Fortran90 Interface: -
    None. - - - -
    - - - -
    -
    -
    Name: H5Pclose -
    Signature: -
    herr_t H5Pclose(hid_t plist - ) -
    Purpose: -
    Terminates access to a property list. -
    Description: -
    H5Pclose terminates access to a property list. - All property lists should be closed when the application is - finished accessing them. - This frees resources used by the property list. -
    Parameters: -
      - - - -
      hid_t plist    IN: Identifier of the property list to terminate access to.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5pclose_f -
    -
    -SUBROUTINE h5pclose_f(prp_id, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: prp_id  ! Property list identifier 
    -  INTEGER, INTENT(OUT) :: hdferr        ! Error code
    -                                        ! 0 on success and -1 on failure
    -END SUBROUTINE h5pclose_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pclose_class - -
    Signature: -
    herr_t H5Pclose_class( - hid_t class - ) - -
    Purpose: -
    Closes an existing property list class. - -
    Description: -
    Removes a property list class from the library. - -

    - Existing property lists of this class will continue to exist, - but new ones are not able to be created. - -

    Parameters: -
      - - - -
      hid_t class    IN: Property list class to close
    - -
    Returns: -
    Success: a non-negative value -
    Failure: a negative value - -
    Fortran90 Interface: h5pclose_class_f -
    -
    -SUBROUTINE h5pclose_class_f(class, hdferr)
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: class ! Property list class identifier 
    -                                      ! to close
    -  INTEGER, INTENT(OUT) :: hdferr      ! Error code
    -                                      ! 0 on success and -1 on failure
    -END SUBROUTINE h5pclose_class_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pclose_list - -
    Signature: -
    herr_t H5Pclose_list( - hid_t plist - ) - -
    Purpose: -
    Closes a property list. - -
    Description: -
    H5Pclose_list closes a property list. - -

    - If a close callback exists for the property list class, - it is called before the property list is destroyed. - If close callbacks exist for any individual properties - in the property list, they are called after the class - close callback. - -

    Parameters: -
      - - -
      hid_t plist     - IN: Property list to close
    - -
    Returns: -
    Success: a non-negative value -
    Failure: a negative value - -
    Fortran90 Interface: h5pclose_list_f -
    -
    -SUBROUTINE h5pclose_list_f(plist, hdferr)
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: plist  ! Property list identifier to close
    -  INTEGER, INTENT(OUT) :: hdferr       ! Error code
    -                                       ! 0 on success and -1 on failure
    -END SUBROUTINE h5pclose_list_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pcopy -
    Signature: -
    hid_t H5Pcopy(hid_t plist - ) -
    Purpose: -
    Copies an existing property list to create a new property list. -
    Description: -
    H5Pcopy copies an existing property list to create - a new property list. - The new property list has the same properties and values - as the original property list. -
    Parameters: -
      - - - -
      hid_t plist    IN: Identifier of property list to duplicate.
    -
    Returns: -
    Returns a property list identifier if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5pcopy_f -
    -
    -SUBROUTINE h5pcopy_f(prp_id, new_prp_id, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: prp_id       ! Property list identifier 
    -  INTEGER(HID_T), INTENT(OUT) :: new_prp_id  ! Identifier  of property list
    -                                             ! copy  
    -  INTEGER, INTENT(OUT) :: hdferr             ! Error code
    -                                             ! 0 on success and -1 on failure
    -END SUBROUTINE h5pcopy_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pcopy_prop - -
    Signature: -
    herr_t H5Pcopy_prop( - hid_t dst_id, - hid_t src_id, - const char *name - ) - -
    Purpose: -
    Copies a property from one list or class to another. - -
    Description: -
    H5Pcopy_prop copies a property from one property - list or class to another. - -

    - If a property is copied from one class to another, all the property - information will be first deleted from the destination class and - then the property information will be copied from the source class - into the destination class. - -

    - If a property is copied from one list to another, the property - will be first deleted from the destination list (generating a call - to the close callback for the property, if one exists) - and then the property is copied from the source list to the - destination list (generating a call to the copy - callback for the property, if one exists). - -

    - If the property does not exist in the class or list, this call is - equivalent to calling H5Pregister or H5Pinsert - (for a class or list, as appropriate) and the create - callback will be called in the case of the property being - copied into a list (if such a callback exists for the property). - -

    Parameters: -
      - - - - - - - - - -
      hid_t dst_idIN: Identifier of the destination property list or - class
      hid_t src_idIN: Identifier of the source property list or class
      const char *name    IN: Name of the property to copy
    - -
    Returns: -
    Success: a non-negative value -
    Failure: a negative value - -
    Fortran90 Interface: h5pcopy_prop_f -
    -
    -SUBROUTINE h5pcopy_prop_f(dst_id, src_id, name, hdferr)
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: dst_id  ! Destination property list 
    -                                        ! identifier 
    -  INTEGER(HID_T), INTENT(IN) :: src_id  ! Source property list identifier 
    -  CHARACTER(LEN=*), INTENT(IN) :: name  ! Property name
    -  INTEGER, INTENT(OUT) :: hdferr        ! Error code
    -                                        ! 0 on success and -1 on failure
    -END SUBROUTINE h5pcopy_prop_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pcreate -
    Signature: -
    hid_t H5Pcreate(hid_t cls_id - ) -
    Purpose: -
    Creates a new property as an instance of a property list class. -
    Description: -
    H5Pcreate creates a new property as an instance of some - property list class. The new property list is initialized - with default values for the specified class. The classes are: -
    -
    H5P_FILE_CREATE -
    Properties for file creation. - See Files - in the HDF User's Guide - for details about the file creation properties. -
    H5P_FILE_ACCESS -
    Properties for file access. - See Files - in the HDF User's Guide - for details about the file creation properties. -
    H5P_DATASET_CREATE -
    Properties for dataset creation. - See Datasets - in the HDF User's Guide - for details about dataset creation properties. -
    H5P_DATASET_XFER -
    Properties for raw data transfer. - See Datasets - in the HDF User's Guide - for details about raw data transfer properties. -
    H5P_MOUNT -
    Properties for file mounting. - With this parameter, H5Pcreate - creates and returns a new mount property list - initialized with default values. -
    -

    - This property list must eventually be closed with - H5Pclose; - otherwise, errors are likely to occur. -

    Parameters: -
      - - - -
      hid_t cls_id    IN: The class of the property list to create.
    -
    Returns: -
    Returns a property list identifier (plist) if successful; - otherwise Fail (-1). -
    Fortran90 Interface: h5pcreate_f -
    -
    -SUBROUTINE h5pcreate_f(classtype, prp_id, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER, INTENT(IN) :: classtype       ! The type of the property list 
    -                                         ! to be created 
    -                                         ! Possible values are: 
    -                                         !    H5P_FILE_CREATE_F 
    -                                         !    H5P_FILE_ACCESS_F
    -                                         !    H5P_DATASET_CREATE_F
    -                                         !    H5P_DATASET_XFER_F 
    -                                         !    H5P_MOUNT_F 
    -  INTEGER(HID_T), INTENT(OUT) :: prp_id  ! Property list identifier 
    -  INTEGER, INTENT(OUT) :: hdferr         ! Error code
    -                                         ! 0 on success and -1 on failure
    -END SUBROUTINE h5pcreate_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pcreate_class - -
    Signature: -
    hid_t H5Pcreate_class( - hid_t class, - const char *name, - H5P_cls_create_func_t create, - H5P_cls_copy_func_t copy, - H5P_cls_close_func_t close - ) - -
    Purpose: -
    Creates a new property list class. - -
    Description: -
    H5Pcreate_class registers a new property list class - with the library. - The new property list class can inherit from an existing property - list class or may be derived from the default "empty" class. - New classes with inherited properties from existing classes - may not remove those existing properties, only add or remove - their own class properties. -

    - - The create routine is called when a new property list - of this class is being created. - The H5P_cls_create_func_t callback function is defined - as follows: -
      typedef herr_t (*H5P_cls_create_func_t)( - hid_t prop_id, - void * create_data - ); -
    - The parameters to this callback function are defined as follows: -
      - - - - - - -
      hid_t prop_idIN: The identifier of the property list being created
      void * create_dataIN/OUT: User pointer to any class creation information needed
    - The create routine is called after any registered - create function is called for each property value. - If the create routine returns a negative value, - the new list is not returned to the user and the - property list creation routine returns an error value. -

    - - The copy routine is called when an existing property list - of this class is copied. - The H5P_cls_copy_func_t callback function - is defined as follows: -
      typedef herr_t (*H5P_cls_copy_func_t)( - hid_t prop_id, - void * copy_data - ); -
    - The parameters to this callback function are defined as follows: -
      - - - - - - -
      hid_t prop_idIN: The identifier of the property list created by copying
      void * copy_dataIN/OUT: User pointer to any class copy information needed
    - The copy routine is called after any registered - copy function is called for each property value. - If the copy routine returns a negative value, the new list - is not returned to the user and the property list copy routine returns - an error value. -

    - - The close routine is called when a property list of this - class - is being closed. - The H5P_cls_close_func_t callback function is defined - as follows: -
      typedef herr_t (*H5P_cls_close_func_t)( - hid_t prop_id, - void * close_data - ); -
    - The parameters to this callback function are defined as follows: -
      - - - - - - -
      hid_t prop_idIN: The identifier of the property list being closed
      void * close_dataIN/OUT: User pointer to any class close information needed
    - The close routine is called before any registered - close function is called for each property value. - If the close routine returns a negative value, - the property list close routine returns an error value - but the property list is still closed. - -
    Parameters: -
      - - - - - - - - - - - - - - - -
      hid_t classIN: Property list class to inherit from.
      const char *nameIN: Name of property list class to register
      H5P_cls_create_func_t create    IN: Callback routine called when a property list is created
      H5P_cls_copy_func_t copyIN: Callback routine called when a property list is copied
      H5P_cls_close_func_t closeIN: Callback routine called when a property list is being closed
      -
    - -
    Returns: -
    Success: a valid property list class identifier -
    Failure: a negative value - - -
    Fortran90 Interface: h5pcreate_class_f -
    -
    -SUBROUTINE h5pcreate_class_f(parent, name, class, hdferr)
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: parent  ! Parent property list class 
    -                                        ! identifier
    -                                        ! Possible values include:
    -                                        !    H5P_NO_CLASS_F
    -                                        !    H5P_FILE_CREATE_F
    -                                        !    H5P_FILE_ACCESS_F
    -                                        !    H5P_DATASET_CREATE_F
    -                                        !    H5P_DATASET_XFER_F
    -                                        !    H5P_MOUNT_F
    -  CHARACTER(LEN=*), INTENT(IN) :: name  ! Name of property to create 
    -  INTEGER(HID_T), INTENT(OUT) :: class  ! Property list class identifier
    -  INTEGER, INTENT(OUT) :: hdferr        ! Error code
    -                                        ! 0 on success and -1 on failure
    -END SUBROUTINE h5pcreate_class_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pcreate_list - -
    Signature: -
    hid_t H5Pcreate_list( - hid_t class) - -
    Purpose: -
    Creates a new property list class of a given class. - -
    Description: -
    H5Pcreate_list creates a new property list of a - given class. If a create callback exists for the - property list class, it is called before the property list - is passed back to the user. - If create callbacks exist for any individual properties - in the property list, they are called before the class - create callback. - -
    Parameter: -
      - - - -
      hid_t class;    IN: Class of property list to create.
    - -
    Returns: -
    Success: a valid property list identifier -
    Failure: a negative value - -
    Fortran90 Interface: -
    None. - - - -
    - - - -
    -
    -
    Name: H5Premove_filter -
    Signature: -
    herr_t H5Premove_filter(hid_t plist, - H5Z_filter_t filter - ) -
    Purpose: -
    Delete one or more filters in the filter pipeline. -
    Description: -
    H5Premove_filter removes the specified - filter from the filter pipeline in the - dataset creation property list plist. -

    - The filter parameter specifies the filter to be removed. - Valid values for use in filter are as follows: - -

    - - - - - - -
    - H5Z_FILTER_ALL - - Removes all filters from the permanent filter pipeline. -
    - H5Z_FILTER_DEFLATE - - Data compression filter, employing the gzip algorithm -
    - H5Z_FILTER_SHUFFLE - - Data shuffling filter -
    - H5Z_FILTER_FLETCHER32   - - Error detection filter, employing the Fletcher32 checksum algorithm -
    - H5Z_FILTER_SZIP - - Data compression filter, employing the SZIP algorithm -
    -
    -

    - Additionally, user-defined filters can be removed with this routine - by passing the filter identifier with which they were registered - with the HDF5 Library. -

    - Attempting to remove a filter that is not in the permanent filter - pipeline is an error. -

    Note: -
    This function currently supports only the permanent filter - pipeline; plist must be a dataset creation - property list. -
    Parameters: -
    -
    hid_t plist_id -
    IN: Dataset creation property list identifier. -
    H5Z_filter_t filter -
    IN: Filter to be deleted. -
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. - -
    Fortran90 Interface: h5premove_filter_f -
    -
    -SUBROUTINE h5premove_filter_f(prp_id, filter, hdferr) 
    -
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: prp_id ! Dataset creation property 
    -                                       ! list identifier
    -  INTEGER, INTENT(IN) :: filter        ! Filter to be removed
    -                                       ! Valid values are:
    -                                       !     H5Z_FILTER_ALL_F
    -                                       !     H5Z_FILTER_DEFLATE_F
    -                                       !     H5Z_FILTER_SHUFFLE_F
    -                                       !     H5Z_FILTER_FLETCHER32_F
    -                                       !     H5Z_FILTER_SZIP_F
    -                                       !
    -  INTEGER, INTENT(OUT) :: hdferr       ! Error code
    -                                       ! 0 on success, -1 on failure
    -END SUBROUTINE h5premove_filter_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pequal - -
    Signature: -
    htri_t H5Pequal( - hid_t id1, - hid_t id2 - ) - -
    Purpose: -
    Compares two property lists or classes for equality. - -
    Description: -
    H5Pequal compares two property lists or classes - to determine whether they are equal to one another. - -

    - Either both id1 and id2 must be - property lists or both must be classes; comparing a list to a - class is an error. - -

    Parameters: -
      - - - - - - -
      hid_t id1    IN: First property object to be compared
      hid_t id2IN: Second property object to be compared
    - -
    Returns: -
    Success: TRUE (positive) if equal; FALSE (zero) if unequal -
    Failure: a negative value - -
    Fortran90 Interface: h5pequal_f -
    -
    -SUBROUTINE h5pequal_f(plist1_id, plist2_id, flag, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: plist1_id ! Property list identifier
    -  INTEGER(HID_T), INTENT(IN) :: plist2_id ! Property list identifier
    -  LOGICAL, INTENET(OUT)      :: flag      ! Flag
    -                                          !    .TRUE. if lists are equal 
    -                                          !    .FALSE. otherwise 
    -  INTEGER, INTENT(OUT)       :: hdferr    ! Error code
    -                                          ! 0 on success and -1 on failure
    -END SUBROUTINE h5pequal_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pexist - -
    Signature: -
    htri_t H5Pexist( - hid_t id; - const char *name - ) - -
    Purpose: -
    Queries whether a property name exists in a property list - or class. - -
    Description: -
    H5Pexist determines whether a property exists - within a property list or class. - -
    Parameters: -
      - - - - - - -
      hid_t idIN: Identifier for the property to query
      const char *name    IN: Name of property to check for
    - -
    Returns: -
    Success: a positive value if the property exists in the - property object; zero if the property does not exist -
    Failure: a negative value - -
    Fortran90 Interface: h5pexist_f -
    -
    -SUBROUTINE h5pexist_f(prp_id, name, flag, hdferr)
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: prp_id  ! Property list identifier 
    -  CHARACTER(LEN=*), INTENT(IN) :: name  ! Name of property to modify
    -  LOGICAL, INTENT(OUT) :: flag          ! Logical flag
    -                                        !    .TRUE. if exists 
    -                                        !    .FALSE. otherwise
    -  INTEGER, INTENT(OUT) :: hdferr        ! Error code
    -                                        ! 0 on success and -1 on failure
    -END SUBROUTINE h5pexist_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pfill_value_defined -
    Signature: -
    herr_t H5Pfill_value_defined(hid_t plist_id, - H5D_fill_value_t *status - ) -
    Purpose: -
    Determines whether fill value is defined. -
    Description: -
    H5Pfill_value_defined determines whether a fill value - is defined in the dataset creation property list plist_id. -

    - Valid values returned in status are as follows: - -
         - H5D_FILL_VALUE_UNDEFINED - - Fill value is undefined. -
    - H5D_FILL_VALUE_DEFAULT - - Fill value is the library default. -
    - H5D_FILL_VALUE_USER_DEFINED   - - Fill value is defined by the application. -
    -

    Note: -
    H5Pfill_value_defined is designed for use in - concert with the dataset fill value properties functions - H5Pget_fill_value and H5Pget_fill_time. -

    - See H5Dcreate for - further cross-references. -

    Parameters: -
      - - - - - - -
      hid_t plist_idIN: Dataset creation property list identifier.
      H5D_fill_value_t *status    OUT: Status of fill value in property list.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: -
    None. - - - -
    - - - -
    -
    -
    Name: H5Pget - -
    Signature: -
    herr_t H5Pget( - hid_t plid, - const char *name, - void *value - ) - -
    Purpose: -
    Queries the value of a property. - -
    Description: -
    H5Pget retrieves a copy of the value for a property - in a property list. If there is a get callback routine - registered for this property, the copy of the value of the property - will first be passed to that routine and any changes to the copy of - the value will be used when returning the property value from this - routine. - -

    - This routine may be called for zero-sized properties with the - value set to NULL. The get routine - will be called with a NULL value if the callback exists. - -

    - The property name must exist or this routine will fail. - -

    - If the get callback routine returns an error, - value will not be modified. - -

    Parameters: -
      - - - - - - - - - -
      hid_t plidIN: Identifier of the property list to query
      const char *name    IN: Name of property to query
      void *valueOUT: Pointer to a location to which to copy the value of - of the property
    - -
    Returns: -
    Success: a non-negative value -
    Failure: a negative value - -
    Fortran90 Interface: h5pget_f -
    -
    -SUBROUTINE h5pget_f(plid, name, value, hdferr)
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: plid    ! Property list identifier 
    -  CHARACTER(LEN=*), INTENT(IN) :: name  ! Name of property to get
    -  TYPE,  INTENT(OUT) :: value           ! Property value
    -                                        ! Supported types are:
    -                                        !    INTEGER
    -                                        !    REAL
    -                                        !    DOUBLE PRECISION
    -                                        !    CHARACTER(LEN=*)
    -  INTEGER, INTENT(OUT) :: hdferr        ! Error code
    -                                        ! 0 on success and -1 on failure
    -END SUBROUTINE h5pget_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pget_alignment -
    Signature: -
    herr_t H5Pget_alignment(hid_t plist, - hsize_t *threshold, - hsize_t *alignment - ) -
    Purpose: -
    Retrieves the current settings for alignment properties from a - file access property list. -
    Description: -
    H5Pget_alignment retrieves the current settings for - alignment properties from a file access property list. - The threshold and/or alignment pointers - may be null pointers (NULL). -
    Parameters: -
      - - - - - - - - - -
      hid_t plistIN: Identifier of a file access property list.
      hsize_t *threshold    OUT: Pointer to location of return threshold value.
      hsize_t *alignmentOUT: Pointer to location of return alignment value.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5pget_alignment_f -
    -
    -SUBROUTINE h5pget_alignment_f(prp_id, threshold,  alignment, hdferr)
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: prp_id        ! Property list identifier
    -  INTEGER(HSIZE_T), INTENT(OUT) :: threshold  ! Threshold value
    -  INTEGER(HSIZE_T), INTENT(OUT) :: alignment  ! Alignment value
    -  INTEGER, INTENT(OUT) :: hdferr              ! Error code
    -                                              ! 0 on success and -1 on failure
    -END SUBROUTINE h5pget_alignment_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pget_alloc_time -
    Signature: -
    herr_t H5Pget_alloc_time(hid_t plist_id, - H5D_alloc_time_t *alloc_time - ) -
    Purpose: -
    Retrieves the timing for storage space allocation. -
    Description: -
    H5Pget_alloc_time retrieves the timing for allocating - storage space for a dataset's raw data. - This property is set in the dataset creation property list - plist_id. -

    - The timing setting is returned in fill_time as one of the - following values: - -
         - H5D_ALLOC_TIME_DEFAULT   - - Uses the default allocation time, based on the dataset storage method.
    - See the fill_time description in - H5Pset_alloc_time for - default allocation times for various storage methods. -
    - H5D_ALLOC_TIME_EARLY - - All space is allocated when the dataset is created. -
    - H5D_ALLOC_TIME_INCR   - - Space is allocated incrementally as data is written to the dataset. -
    - H5D_ALLOC_TIME_LATE - - All space is allocated when data is first written to the dataset. -
    -

    Note: -
    H5Pget_alloc_time is designed to work in concert - with the dataset fill value and fill value write time properties, - set with the functions - H5Pget_fill_value and H5Pget_fill_time. -
    Parameters: -
      - - - - - - -
      hid_t plist_idIN: Dataset creation property list identifier.
      H5D_alloc_time_t *alloc_time    IN: When to allocate dataset storage space.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5pget_alloc_time_f -
    -
    -SUBROUTINE h5pget_alloc_time_f(plist_id, flag, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: plist_id   ! Dataset creation
    -                                           ! property list identifier
    -  INTEGER(HSIZE_T), INTENT(OUT) :: flag    ! Allocation time flag
    -                                           ! Possible values are:
    -                                           !    H5D_ALLOC_TIME_ERROR_F
    -                                           !    H5D_ALLOC_TIME_DEFAULT_F
    -                                           !    H5D_ALLOC_TIME_EARLY_F
    -                                           !    H5D_ALLOC_TIME_LATE_F
    -                                           !    H5D_ALLOC_TIME_INCR_F
    -  INTEGER, INTENT(OUT)       :: hdferr     ! Error code
    -                                           ! 0 on success and -1 on failure
    -END SUBROUTINE h5pget_alloc_time_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pget_btree_ratios -
    Signature: -
    herr_t H5Pget_btree_ratios(hid_t plist, - double *left, - double *middle, - double *right - ) -
    Purpose: -
    Gets B-tree split ratios for a dataset transfer property list. -
    Description: -
    H5Pget_btree_ratios returns the B-tree split ratios - for a dataset transfer property list. -

    - The B-tree split ratios are returned through the non-NULL - arguments left, middle, and right, - as set by the H5Pset_btree_ratios function. -

    Parameters: -
      - - - - - - - - - - - - -
      hid_t plistIN: The dataset transfer property list identifier.
      double leftOUT: The B-tree split ratio for left-most nodes.
      double rightOUT: The B-tree split ratio for right-most nodes and lone nodes.
      double middle    OUT: The B-tree split ratio for all other nodes.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5pget_btree_ratios_f -
    -
    -SUBROUTINE h5pget_btree_ratios_f(prp_id, left, middle, right, hdferr)
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: prp_id  
    -                                  ! Property list identifier
    -  REAL, INTENT(OUT) :: left       ! B-tree split ratio for left-most nodes
    -  REAL, INTENT(OUT) :: middle     ! B-tree split ratio for all other nodes
    -  REAL, INTENT(OUT) :: right      ! The B-tree split ratio for right-most
    -                                  ! nodes and lone nodes.
    -  INTEGER, INTENT(OUT) :: hdferr  ! Error code
    -                                  ! 0 on success and -1 on failure
    -END SUBROUTINE h5pget_btree_ratios_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pget_buffer -
    Signature: -
    hsize_t H5Pget_buffer(hid_t plist, - void **tconv, - void **bkg - ) -
    Purpose: -
    Reads buffer settings. -
    Description: -
    H5Pget_buffer reads values previously set - with H5Pset_buffer. -
    Parameters: -
      - - - - - - - - - -
      hid_t plistIN: Identifier for the dataset transfer property list.
      void **tconv    OUT: Address of the pointer to application-allocated - type conversion buffer.
      void **bkgOUT: Address of the pointer to application-allocated - background buffer.
    -
    Returns: -
    Returns buffer size, in bytes, if successful; - otherwise 0 on failure. -
    Fortran90 Interface: h5pget_buffer_f -
    -
    -SUBROUTINE h5pget_buffer_f(plist_id, size, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN)    :: plist_id ! Dataset transfer 
    -                                            ! property list identifier
    -  INTEGER(HSIZE_T), INTENT(OUT) :: size     ! Conversion buffer size
    -  INTEGER, INTENT(OUT)          :: hdferr   ! Error code
    -                                            ! 0 on success and -1 on failure
    -END SUBROUTINE h5pget_buffer_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pget_cache -
    Signature: -
    herr_t H5Pget_cache(hid_t plist_id, - int *mdc_nelmts, - int *rdcc_nelmts, - size_t *rdcc_nbytes, - double *rdcc_w0 - ) -
    Purpose: -
    Queries the meta data cache and raw data chunk cache parameters. -
    Description: -
    H5Pget_cache retrieves the maximum possible - number of elements in the meta - data cache and raw data chunk cache, the maximum possible number of - bytes in the raw data chunk cache, and the preemption policy value. -

    - Any (or all) arguments may be null pointers, in which case the - corresponding datum is not returned. -

    Parameters: -
      - - - - - - - - - - - - - - - -
      hid_t plist_idIN: Identifier of the file access property list.
      int *mdc_nelmtsIN/OUT: Number of elements (objects) in the meta data cache.
      int *rdcc_nelmtsIN/OUT: Number of elements (objects) in the raw data chunk cache.
      size_t *rdcc_nbytes    IN/OUT: Total size of the raw data chunk cache, in bytes.
      double *rdcc_w0IN/OUT: Preemption policy.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5pget_cache_f -
    -
    -SUBROUTINE h5pget_cache_f(prp_id, mdc_nelmts, rdcc_nelmts, rdcc_nbytes,
    -                          rdcc_w0, hdferr)
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: prp_id         ! Property list identifier
    -  INTEGER, INTENT(OUT) :: mdc_nelmts           ! Number of elements (objects)
    -                                               ! in the meta data cache
    -  INTEGER(SIZE_T), INTENT(OUT) :: rdcc_nelmts  ! Number of elements (objects)
    -                                               ! in the meta data cache
    -  INTEGER(SIZE_T), INTENT(OUT) :: rdcc_nbytes  ! Total size of the raw data
    -                                               ! chunk cache, in bytes
    -  REAL, INTENT(OUT) :: rdcc_w0                 ! Preemption policy
    -  INTEGER, INTENT(OUT) :: hdferr               ! Error code
    -                                               ! 0 on success and -1 on failure
    -END SUBROUTINE h5pget_cache_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pget_chunk -
    Signature: -
    int H5Pget_chunk(hid_t plist, - int max_ndims, - hsize_t * dims - ) -
    Purpose: -
    Retrieves the size of chunks for the raw data of a chunked layout dataset. - -
    Description: -
    H5Pget_chunk retrieves the size of chunks for the - raw data of a chunked layout dataset. - This function is only valid for dataset creation property lists. - At most, max_ndims elements of dims - will be initialized. -
    Parameters: -
      - - - - - - - - - -
      hid_t plistIN: Identifier of property list to query.
      int max_ndims    IN: Size of the dims array.
      hsize_t * dimsOUT: Array to store the chunk dimensions.
    -
    Returns: -
    Returns chunk dimensionality successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5pget_chunk_f -
    -
    -SUBROUTINE h5pget_chunk_f(prp_id, ndims, dims, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: prp_id  ! Property list identifier 
    -  INTEGER, INTENT(IN) :: ndims          ! Number of chunk dimensions 
    -                                        ! to return
    -  INTEGER(HSIZE_T), DIMENSION(ndims), INTENT(OUT) :: dims    
    -                                        ! Array containing sizes of
    -                                        ! chunk dimensions
    -  INTEGER, INTENT(OUT) :: hdferr        ! Error code
    -                                        ! chunk rank on success 
    -                                        ! and -1 on failure
    -END SUBROUTINE h5pget_chunk_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pget_class -
    Signature: -
    H5P_class_t H5Pget_class(hid_t plist - ) -
    Purpose: -
    Returns the property list class for a property list. -
    Description: -
    H5Pget_class returns the property list class for the - property list identified by the plist parameter. - Valid property list classes are defined in the description of - H5Pcreate. -
    Parameters: -
      - - - -
      hid_t plist    IN: Identifier of property list to query.
    -
    Returns: -
    Returns a property list class if successful. - Otherwise returns H5P_NO_CLASS (-1). -
    Fortran90 Interface: h5pget_class_f -
    -
    -SUBROUTINE h5pget_class_f(prp_id, classtype, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: prp_id ! Property list identifier 
    -  INTEGER, INTENT(OUT) :: classtype    ! The type of the property list 
    -                                       ! to be created 
    -                                       ! Possible values are: 
    -                                       !    H5P_NO_CLASS  
    -                                       !    H5P_FILE_CREATE_F 
    -                                       !    H5P_FILE_ACCESS_F 
    -                                       !    H5PE_DATASET_CREATE_F 
    -                                       !    H5P_DATASET_XFER_F
    -                                       !    H5P_MOUNT_F 
    -  INTEGER, INTENT(OUT) :: hdferr       ! Error code
    -                                       ! 0 on success and -1 on failure
    -END SUBROUTINE h5pget_class_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pget_class_name - -
    Purpose: -
    Retrieves the name of a class. - -
    Signature: -
    char * H5Pget_class_name( - hid_t pcid - ) - -
    Description: -
    H5Pget_class_name retrieves the name of a - generic property list class. The pointer to the name - must be freed by the user after each successful call. - -
    Parameters: -
      - - - -
      hid_t pcid    IN: Identifier of the property class to query
    - -
    Returns: -
    Success: a pointer to an allocated string containing the class name -
    Failure: NULL - -
    Fortran90 Interface: h5pget_class_name_f -
    -
    -SUBROUTINE h5pget_class_name_f(prp_id, name, hdferr)
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: prp_id     ! Property list identifier to
    -                                           ! query 
    -  CHARACTER(LEN=*), INTENT(INOUT) :: name  ! Buffer to retrieve class name
    -  INTEGER, INTENT(OUT) :: hdferr           ! Error code, possible values:
    -                                           ! Success:  Actual length of the 
    -                                           ! class name
    -                                           ! If provided buffer "name" is 
    -                                           ! smaller, than name will be 
    -                                           ! truncated to fit into
    -                                           ! provided user buffer
    -                                           ! Failure: -1   
    -END SUBROUTINE h5pget_class_name_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pget_class_parent - -
    Signature: -
    hid_t H5Pget_class_parent( - hid_t pcid - ) - -
    Purpose: -
    Retrieves the parent class of a property class. - -
    Description: -
    H5Pget_class_parent retrieves an identifier for the - parent class of a property class. - -
    Parameters: -
      - - - -
      hid_t pcid    IN: Identifier of the property class to query
    - -
    Returns: -
    Success: a valid parent class object identifier -
    Failure: a negative value - -
    Fortran90 Interface: h5pget_class_parent_f -
    -
    -SUBROUTINE h5pget_class_parent_f(prp_id, parent_id, hdferr)
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: prp_id     ! Property list identifier 
    -  INTEGER(HID_T), INTENT(OUT) :: parent_id ! Parent class property list 
    -                                           ! identifier
    -  INTEGER, INTENT(OUT) :: hdferr           ! Error code
    -                                           ! 0 on success and -1 on failure
    -END SUBROUTINE h5pget_class_parent_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pget_data_transform -
    Signature: -
    ssize_t H5Pget_data_transform - (hid_t plist_id, - char *expression, - size_t size) -
    Purpose: -
    Retrieves a data transform expression. -
    Description: -
    H5Pget_data_transform retrieves the data - transform expression previously set in the dataset transfer - property list plist_id by H5Pset_data_transform. -

    H5Pget_data_transform can be used to both - retrieve the transform expression and to query its size. -

    - If expression is non-NULL, up to size - bytes of the data transform expression are written to the buffer. - If expression is NULL, size is ignored - and the function does not write anything to the buffer. - The function always returns the size of the data transform expression. -

    - If 0 is returned for the size of the expression, - no data transform expression exists for the property list. -

    - If an error occurs, the buffer pointed to by expression - is unchanged and the function returns a negative value. -

    Parameters: -
      - - - - - - - - - -
      hid_t plist_idIN: Identifier of the property list or class
      char *expression  OUT: Pointer to memory where the transform - expression will be copied
      size_t sizeIN: Number of bytes of the transform expression - to copy to
    -
    Returns: -
    Success: size of the transform expression. -
    Failure: a negative value. -
    Fortran90 Interface: -
    None. - - - -
    - - - -
    -
    -
    Name: H5Pget_driver -
    Signature: -
    hid_t H5Pget_driver( - hid_t plist_id - ) -
    Purpose: -
    Returns low-lever driver identifier. -
    Description: -
    H5Pget_driver returns the identifier of the - low-level file driver associated with the file access property list - or data transfer property list plist_id. -

    - Valid driver identifiers with the standard HDF5 library distribution - include the following: -

    -           H5FD_CORE
    -           H5FD_FAMILY
    -           H5FD_GASS
    -           H5FD_LOG
    -           H5FD_MPIO
    -           H5FD_MULTI
    -           H5FD_SEC2
    -           H5FD_STDIO
    -           H5FD_STREAM 
    - If a user defines and registers custom drivers or - if additional drivers are defined in an HDF5 distribution, - this list will be longer. -

    - The returned driver identifier is only valid as long as the - file driver remains registered. -

    Parameters: -
      - - - -
      hid_t plist_id    IN: File access or data transfer property list identifier.
    -
    Returns: -
    Returns a valid low-level driver identifier if successful. - Otherwise returns a negative value. -
    Fortran90 Interface: h5pget_driver_f -
    -
    -SUBROUTINE h5pget_driver_f(prp_id, driver, hdferr)
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: prp_id ! Property list identifier
    -  INTEGER, INTENT(OUT) :: driver       ! Low-level file driver identifier
    -  INTEGER, INTENT(OUT) :: hdferr       ! Error code
    -                                       ! 0 on success and -1 on failure
    -END SUBROUTINE h5pget_driver_f
    -	
    - - -
    - - - - - -
    -
    -
    Name: H5Pget_dxpl_mpio -
    Signature: -
    herr_t H5Pget_dxpl_mpio( - hid_t dxpl_id, - H5FD_mpio_xfer_t *xfer_mode - ) -
    Purpose: -
    Returns the data transfer mode. -
    Description: -
    H5Pget_dxpl_mpio queries the data transfer mode - currently set in the data transfer property list dxpl_id. -

    - Upon return, xfer_mode contains the data transfer mode, - if it is non-null. -

    - H5Pget_dxpl_mpio is not a collective function. -

    Parameters: -
      - - - - - - -
      hid_t dxpl_idIN: Data transfer property list identifier.
      H5FD_mpio_xfer_t *xfer_mode    OUT: Data transfer mode.
    -
    Returns: -
    Returns a non-negative value if successful. - Otherwise returns a negative value. -
    Fortran90 Interface: h5pget_dxpl_mpio_f -
    -
    -SUBROUTINE h5pget_dxpl_mpio_f(prp_id, data_xfer_mode, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: prp_id    ! Property list identifier
    -  INTEGER, INTENT(OUT) :: data_xfer_mode  ! Data transfer mode
    -                                          ! Possible values are:
    -                                          !    H5FD_MPIO_INDEPENDENT_F
    -                                          !    H5FD_MPIO_COLLECTIVE_F
    -  INTEGER, INTENT(OUT) :: hdferr          ! Error code
    -                                          ! 0 on success and -1 on failure
    -END SUBROUTINE h5pget_dxpl_mpio_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pget_dxpl_multi -
    Signature: -
    herr_t H5Pget_dxpl_multi( - hid_t dxpl_id, - const hid_t *memb_dxpl - ) -
    -
    -
    Purpose: -
    Returns multi-file data transfer property list information. -
    Description: -
    H5Pget_dxpl_multi returns the data transfer property list - information for the multi-file driver. -
    Parameters: -
      - - - - - - -
      hid_t dxpl_id,IN: Data transfer property list identifier.
      const hid_t *memb_dxpl    OUT: Array of data access property lists.
    -
    Returns: -
    Returns a non-negative value if successful. - Otherwise returns a negative value. -
    Fortran90 Interface: -
    None. - - - -
    - - - -
    -
    -
    Name: H5Pget_edc_check -
    Signature: -
    H5Z_EDC_t H5Pget_edc_check(hid_t plist) -
    Purpose: -
    Determines whether error-detection is enabled for dataset reads. -
    Description: -
    H5Pget_edc_check queries the dataset transfer property - list plist to determine whether error detection - is enabled for data read operations. -
    Parameters: -
      - - - -
      hid_t plist    IN: Dataset transfer property list identifier.
    -
    Returns: -
    Returns H5Z_ENABLE_EDC or H5Z_DISABLE_EDC - if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5pget_edc_check_f -
    -
    -SUBROUTINE h5pget_edc_check_f(prp_id, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: prp_id  ! Dataset transfer property list 
    -                                        ! identifier 
    -  INTEGER, INTENT(OUT)       :: flag    ! EDC flag; possible values
    -                                        !    H5Z_DISABLE_EDC_F 
    -                                        !    H5Z_ENABLE_EDC_F 
    -  INTEGER, INTENT(OUT)       :: hdferr  ! Error code
    -                                        ! 0 on success and -1 on failure
    -END SUBROUTINE h5pget_edc_check_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pget_external -
    Signature: -
    herr_t H5Pget_external(hid_t plist, - unsigned idx, - size_t name_size, - char *name, - off_t *offset, - hsize_t *size - ) -
    Purpose: -
    Returns information about an external file. -
    Description: -
    H5Pget_external returns information about an external - file. The external file is specified by its index, idx, - which is a number from zero to N-1, where N is the value - returned by H5Pget_external_count. - At most name_size characters are copied into the - name array. If the external file name is - longer than name_size with the null terminator, the - return value is not null terminated (similar to strncpy()). -

    - If name_size is zero or name is the - null pointer, the external file name is not returned. - If offset or size are null pointers - then the corresponding information is not returned. -

    Parameters: -
      - - - - - - - - - - - - - - - - - - -
      hid_t plistIN: Identifier of a dataset creation property list.
      unsigned idxIN: External file index.
      size_t name_size    IN: Maximum length of name array.
      char *nameOUT: Name of the external file.
      off_t *offsetOUT: Pointer to a location to return an offset value.
      hsize_t *sizeOUT: Pointer to a location to return the size of the - external file data.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5pget_external_f -
    -
    -SUBROUTINE h5pget_external_f(prp_id, idx, name_size, name, offset,bytes, hdferr)
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: prp_id   ! Property list identifier
    -  INTEGER, INTENT(IN) :: idx             ! External file index.
    -  INTEGER, INTENT(IN) :: name_size       ! Maximum length of name array
    -  CHARACTER(LEN=*), INTENT(OUT) :: name  ! Name of an external file
    -  INTEGER, INTENT(OUT) :: offset         ! Offset, in bytes, from the 
    -                                         ! beginning of the file to the 
    -                                         ! location in the file where
    -                                         ! the data starts.
    -  INTEGER(HSIZE_T), INTENT(OUT) :: bytes ! Number of bytes reserved in 
    -                                         ! the file for the data
    -  INTEGER, INTENT(OUT) :: hdferr         ! Error code
    -                                         ! 0 on success and -1 on failure
    -END SUBROUTINE h5pget_external_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pget_external_count -
    Signature: -
    int H5Pget_external_count(hid_t plist - ) -
    Purpose: -
    Returns the number of external files for a dataset. -
    Description: -
    H5Pget_external_count returns the number of external files - for the specified dataset. -
    Parameters: -
      - - - -
      hid_t plist    IN: Identifier of a dataset creation property list.
    -
    Returns: -
    Returns the number of external files if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5pget_external_count_f -
    -
    -SUBROUTINE h5pget_external_count_f (prp_id, count, hdferr)
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: prp_id ! Property list identifier
    -  INTEGER, INTENT(OUT) :: count        ! Number of external files for 
    -                                       ! the specified dataset
    -  INTEGER, INTENT(OUT) :: hdferr       ! Error code
    -                                       ! 0 on success and -1 on failure
    -END SUBROUTINE h5pget_external_count_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pget_family_offset -
    Signature: -
    herr_t H5Pget_family_offset ( - hid_t fapl_id, - hsize_t *offset - ) -
    Purpose: -
    Retrieves a data offset from the file access property list. -
    Description: -
    H5Pget_family_offset retrieves the value of offset - from the file access property list fapl_id - so that the user application - can retrieve a file handle for low-level access to a particular member - of a family of files. The file handle is retrieved with a separate call - to H5Fget_vfd_handle - (or, in special circumstances, to H5FDget_vfd_handle; - see Virtual File Layer and List of VFL Functions - in HDF5 Technical Notes). -

    - The data offset returned in offset is the offset - of the data in the HDF5 file that is stored on disk in the selected - member file in a family of files. -

    - Use of this function is only appropriate for an HDF5 file written as a - family of files with the FAMILY file driver. -

    Parameters: -
      - - - - - - -
      hid_t fapl_idIN: File access property list identifier.
      hsize_t *offset    IN: Offset in bytes within the HDF5 file.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: -
    None. - - - -
    - - - -
    -
    -
    Name: H5Pget_fapl_core -
    Signature: -
    herr_t H5Pget_fapl_core( - hid_t fapl_id, - size_t *increment, - hbool_t *backing_store - ) -
    Purpose: -
    Queries core file driver properties. -
    Description: -
    H5Pget_fapl_core queries the H5FD_CORE - driver properties as set by H5Pset_fapl_core. -
    Parameters: -
      - - - - - - - - - -
      hid_t fapl_idIN: File access property list identifier.
      size_t *incrementOUT: Size, in bytes, of memory increments.
      hbool_t *backing_store    OUT: Boolean flag indicating whether to write the file - contents to disk when the file is closed.
    -
    Returns: -
    Returns a non-negative value if successful. - Otherwise returns a negative value. -
    Fortran90 Interface: h5pget_fapl_core_f -
    -
    -SUBROUTINE h5pget_fapl_core_f(prp_id, increment, backing_store, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN)  :: prp_id     ! Property list identifier
    -  INTEGER(SIZE_T), INTENT(OUT) :: increment ! File block size in bytes
    -  LOGICAL, INTENT(OUT) :: backing_store     ! Flag to indicate that entire
    -                                            ! file contents are flushed to 
    -                                            ! a file with the same name as 
    -                                            ! this core file
    -  INTEGER, INTENT(OUT) :: hdferr            ! Error code
    -                                            ! 0 on success and -1 on failure
    -END SUBROUTINE h5pget_fapl_core_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pget_fapl_family -
    Signature: -
    herr_t H5Pget_fapl_family ( - hid_t fapl_id, - hsize_t *memb_size, - hid_t *memb_fapl_id - ) -
    Purpose: -
    Returns file access property list information. -
    Description: -
    H5Pget_fapl_family returns file access property list - for use with the family driver. - This information is returned through the output parameters. -
    Parameters: -
      - - - - - - - - - -
      hid_t fapl_idIN: File access property list identifier.
      hsize_t *memb_sizeOUT: Size in bytes of each file member.
      hid_t *memb_fapl_id    OUT: Identifier of file access property list for each - family member.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5pget_fapl_family_f -
    -
    -SUBROUTINE h5pget_fapl_family_f(prp_id, imemb_size, memb_plist, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN)    :: prp_id    ! Property list identifier
    -  INTEGER(HSIZE_T), INTENT(OUT) :: memb_size ! Logical size, in bytes,
    -                                             ! of each family member
    -  INTEGER(HID_T), INTENT(OUT) :: memb_plist  ! Identifier of the file 
    -                                             ! access property list to be
    -                                             ! used for each family member
    -  INTEGER, INTENT(OUT) :: hdferr             ! Error code
    -                                             ! 0 on success and -1 on failure
    -END SUBROUTINE h5pget_fapl_family_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pget_fapl_gass -
    Signature: -
    herr_t H5Pget_fapl_gass( - hid_t fapl_id, - GASS_Info *info - ) -
    Purpose: -
    Retrieves GASS information. -
    Description: -
    If the file access property list fapl_id is set - for use of the H5FD_GASS driver, - H5Pget_fapl_gass returns the GASS_Info - object through the info pointer. -

    - The GASS_Info information is copied, so it is valid - only until the file access property list is modified or closed. -

    Note: -
    H5Pget_fapl_gass is an experimental function. - It is designed for use only when accessing files via the - GASS facility of the Globus environment. - For further information, see - http//www.globus.org/. -
    Parameters: -
      - - - - - - -
      hid_t fapl_id,IN: File access property list identifier.
      GASS_Info *info    OUT: Pointer to the GASS information structure.
    -
    Returns: -
    Returns a non-negative value if successful. - Otherwise returns a negative value. -
    Fortran90 Interface: -
    None. - - - -
    - - - -
    -
    -
    Name: H5Pget_fapl_mpio -
    Signature: -
    herr_t H5Pget_fapl_mpio( - hid_t fapl_id, - MPI_Comm *comm, - MPI_Info *info - ) -
    Purpose: -
    Returns MPI communicator information. -
    Description: -
    If the file access property list is set to the H5FD_MPIO - driver, H5Pget_fapl_mpio returns the MPI communicator and - information through the comm and info - pointers, if those values are non-null. -

    - Neither comm nor info is copied, - so they are valid only until the file access property list - is either modified or closed. -

    Parameters: -
      - - - - - - - - - -
      hid_t fapl_idIN: File access property list identifier.
      MPI_Comm *comm    OUT: MPI-2 communicator.
      MPI_Info *infoOUT: MPI-2 info object.
    -
    Returns: -
    Returns a non-negative value if successful. - Otherwise returns a negative value. -
    Fortran90 Interface: h5pget_fapl_mpio_f -
    -
    -SUBROUTINE h5pget_fapl_mpio_f(prp_id, comm, info, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: prp_id ! Property list identifier
    -  INTEGER, INTENT(OUT) :: comm         ! Buffer to return communicator 
    -  INTEGER, INTENT(IN) :: info          ! Buffer to return info object as
    -                                       ! defined in MPI_FILE_OPEN of MPI-2
    -  INTEGER, INTENT(OUT) :: hdferr       ! Error code
    -                                       ! 0 on success and -1 on failure
    -END SUBROUTINE h5pget_fapl_mpio_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pget_fapl_mpiposix -
    Signature: -
    herr_t H5Pget_fapl_mpiposix( - hid_t fapl_id, - MPI_Comm *comm - ) -
    Purpose: -
    Returns MPI communicator information. -
    Description: -
    If the file access property list is set to the H5FD_MPIO - driver, H5Pget_fapl_mpiposix returns - the MPI communicator through the comm - pointer, if those values are non-null. -

    - comm is not copied, so it is valid only - until the file access property list is either modified or closed. -

    Parameters: -
      - - - - - - -
      hid_t fapl_idIN: File access property list identifier.
      MPI_Comm *comm    OUT: MPI-2 communicator.
    -
    Returns: -
    Returns a non-negative value if successful. - Otherwise returns a negative value. -
    Fortran90 Interface: h5pget_fapl_mpiposix_f -
    -
    -SUBROUTINE h5pget_fapl_mpiposix_f(prp_id, comm, use_gpfs, hdferr)
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: prp_id  ! Property list identifier
    -  INTEGER, INTENT(OUT) :: comm          ! Buffer to return communicator
    -  LOGICAL, INTENT(OUT) :: use_gpfs
    -  INTEGER, INTENT(OUT) :: hdferr        ! Error code
    -END SUBROUTINE h5pget_fapl_mpiposix_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pget_fapl_multi -
    Signature: -
    herr_t H5Pget_fapl_multi( - hid_t fapl_id, - const H5FD_mem_t *memb_map, - const hid_t *memb_fapl, - const char **memb_name, - const haddr_t *memb_addr, - hbool_t *relax - ) -
    Purpose: -
    Returns information about the multi-file access property list. -
    Description: -
    H5Pget_fapl_multi returns information about the - multi-file access property list. -
    Parameters: -
      - - - - - - - - - - - - - - - - - - -
      hid_t fapl_idIN: File access property list identifier.
      const H5FD_mem_t *memb_map    OUT: Maps memory usage types to other memory usage types.
      const hid_t *memb_faplOUT: Property list for each memory usage type.
      const char **memb_nameOUT: Name generator for names of member files.
      const haddr_t *memb_addrOUT:
      hbool_t *relaxOUT: Allows read-only access to incomplete file sets - when TRUE.
    -
    Returns: -
    Returns a non-negative value if successful. - Otherwise returns a negative value. -
    Fortran90 Interface: h5pget_fapl_multi_f -
    -
    -SUBROUTINE h5pget_fapl_multi_f(prp_id, memb_map, memb_fapl, memb_name,
    -                               memb_addr, relax, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T),INTENT(IN)   :: prp_id    ! Property list identifier
    -
    -  INTEGER,DIMENSION(0:H5FD_MEM_NTYPES_F-1),INTENT(OUT)          :: memb_map
    -  INTEGER(HID_T),DIMENSION(0:H5FD_MEM_NTYPES_F-1),INTENT(OUT)   :: memb_fapl
    -  CHARACTER(LEN=*),DIMENSION(0:H5FD_MEM_NTYPES_F-1),INTENT(OUT) :: memb_name
    -  REAL, DIMENSION(0:H5FD_MEM_NTYPES_F-1), INTENT(OUT)           :: memb_addr
    -               ! Numbers in the interval [0,1) (e.g. 0.0 0.1 0.5 0.2 0.3 0.4)
    -               ! real address in the file will be calculated as X*HADDR_MAX 
    -
    -  LOGICAL, INTENT(OUT) :: relax
    -  INTEGER, INTENT(OUT) :: hdferr           ! Error code
    -                                           ! 0 on success and -1 on failure
    -END SUBROUTINE h5pget_fapl_multi_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pget_fapl_srb -
    Signature: -
    herr_t H5Pget_fapl_srb( - hid_t fapl_id, - SRB_Info *info - ) -
    Purpose: -
    Retrieves SRB information. -
    Description: -
    If the file access property list fapl_id is set - for use of the H5FD_SRB driver, - H5Pget_fapl_srb returns the SRB_Info - object through the info pointer. -

    - The SRB_Info information is copied, so it is valid - only until the file access property list is modified or closed. -

    Note: -
    H5Pset_fapl_gass is an experimental function. - It is designed for use only when accessing files via the - Storage Resource Broker (SRB). For further information, see - http//www.npaci.edu/Research/DI/srb/. -
    Parameters: -
      - - - - - - -
      hid_t fapl_idIN: File access property list identifier.
      SRB_Info *info    OUT: Pointer to the SRB information structure.
    -
    Returns: -
    Returns a non-negative value if successful. - Otherwise returns a negative value. -
    Fortran90 Interface: -
    None. - - -
    - - - -
    -
    -
    Name: H5Pget_fapl_stream -
    Signature: -
    herr_t H5Pget_fapl_stream( - hid_t fapl_id, - H5FD_stream_fapl_t *fapl - ) -
    Purpose: -
    Returns the streaming I/O driver settings. -
    Description: -
    H5Pget_fapl_stream returns the file access properties - set for the use of the streaming I/O driver. -

    - H5Pset_fapl_stream and H5Pget_fapl_stream - are not intended for use in a parallel environment. -

    Parameters: -
      - - - - - - -
      hid_t fapl_idIN: File access property list identifier.
      H5FD_stream_fapl_t *fapl    OUT: The streaming I/O file access property list.
    -
    Returns: -
    Returns a non-negative value if successful. - Otherwise returns a negative value. -
    Fortran90 Interface: -
    None. - - -
    - - - -
    -
    -
    Name: H5Pget_fclose_degree -
    Signature: -
    herr_t H5Pget_fclose_degree(hid_t fapl_id, - H5F_close_degree_t *fc_degree) -
    Purpose: -
    Returns the file close degree. -
    Description: -
    H5Pget_fclose_degree returns the current setting of the file - close degree property fc_degree in the file access property list - fapl_id.  -

    The value of fc_degree determines how aggressively H5Fclose - deals with objects within a file that remain open when H5Fclose - is called to close that file.  fc_degree can have any one of - four valid values as described above in H5Pset_fclose_degree. -

    Parameters: -
      - - - - - - -
      hid_t fapl_idIN: File access property list identifier.
      H5F_close_degree_t *fc_degree    OUT: Pointer to a location to which to return the file close degree - property, the value of fc_degree.
    -
    Returns: -
    Returns a non-negative value if successful. Otherwise returns a negative - value. -
    Fortran90 Interface: h5pget_fclose_degree_f -
    -
    -SUBROUTINE h5pget_fclose_degree_f(fapl_id, degree, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: fapl_id ! File access property list identifier
    -  INTEGER, INTENT(OUT) :: degree        ! Info about file close behavior
    -                                        ! Possible values:
    -                                        !    H5F_CLOSE_DEFAULT_F
    -                                        !    H5F_CLOSE_WEAK_F
    -                                        !    H5F_CLOSE_SEMI_F
    -                                        !    H5F_CLOSE_STRONG_F
    -  INTEGER, INTENT(OUT) :: hdferr        ! Error code
    -                                        ! 0 on success and -1 on failure
    -END SUBROUTINE h5pget_fclose_degree_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pget_fill_time -
    Signature: -
    herr_t H5Pget_fill_time(hid_t plist_id, - H5D_fill_time_t *fill_time - ) -
    Purpose: -
    Retrieves the time when fill value are written to a dataset. -
    Description: -
    H5Pget_fill_time examines the dataset creation - property list plist_id to determine when fill values - are to be written to a dataset. -

    - Valid values returned in fill_time are as follows: - -
         - H5D_FILL_TIME_IFSET   - - Fill values are written to the dataset when storage space is allocated - only if there is a user-defined fill value, i.e., one set with - H5Pset_fill_value. -   (Default) -
      - H5D_FILL_TIME_ALLOC   - - Fill values are written to the dataset when storage space is allocated. -
      - H5D_FILL_TIME_NEVER - - Fill values are never written to the dataset. -
    -

    Note: -
    H5Pget_fill_time is designed to work in coordination - with the dataset fill value and - dataset storage allocation time properties, retrieved with the functions - H5Pget_fill_value and H5Pget_alloc_time. -
    Parameters: -
      - - - - - - -
      hid_t plist_idIN: Dataset creation property list identifier.
      H5D_fill_time_t *fill_time    OUT: Setting for the timing of writing fill values to the dataset.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5pget_fill_time_f -
    -
    -SUBROUTINE h5pget_fill_time_f(plist_id, flag, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: plist_id ! Dataset creation property 
    -                                         ! list identifier
    -  INTEGER(HSIZE_T), INTENT(OUT) :: flag  ! Fill time flag
    -                                         ! Possible values are:
    -                                         !    H5D_FILL_TIME_ERROR_F
    -                                         !    H5D_FILL_TIME_ALLOC_F
    -                                         !    H5D_FILL_TIME_NEVER_F
    -  INTEGER, INTENT(OUT)       :: hdferr   ! Error code
    -                                         ! 0 on success and -1 on failure
    -END SUBROUTINE h5pget_fill_time_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pget_fill_value -
    Signature: -
    herr_t H5Pget_fill_value(hid_t plist_id, - hid_t type_id, - void *value - ) -
    Purpose: -
    Retrieves a dataset fill value. -
    Description: -
    H5Pget_fill_value returns the dataset - fill value defined in the dataset creation property list - plist_id. -

    - The fill value is returned through the value - pointer and will be converted to the datatype specified - by type_id. - This datatype may differ from the - fill value datatype in the property list, - but the HDF5 library must be able to convert between the - two datatypes. -

    - If the fill value is undefined, - i.e., set to NULL in the property list, - H5Pget_fill_value will return an error. - H5Pfill_value_defined should be used to - check for this condition before - H5Pget_fill_value is called. -

    - Memory must be allocated by the calling application. -

    Note: -
    H5Pget_fill_value is designed to coordinate - with the dataset storage allocation time and - fill value write time properties, which can be retrieved - with the functions H5Pget_alloc_time - and H5Pget_fill_time, respectively. - -
    Parameters: -
      - - - - - - - - -
      hid_t plist_id    IN: Dataset creation property list identifier.
      hid_t type_id,IN: Datatype identifier for the value passed - via value.
      void *valueOUT: Pointer to buffer to contain the returned fill value.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5pget_fill_value_f -
    -
    -SUBROUTINE h5pget_fill_value_f(prp_id, type_id, fillvalue, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: prp_id  ! Property list identifier 
    -  INTEGER(HID_T), INTENT(IN) :: type_id ! Datatype identifier of fill
    -                                        ! value datatype (in memory) 
    -  TYPE(VOID), INTENT(IN) :: fillvalue   ! Fillvalue
    -  INTEGER, INTENT(OUT) :: hdferr        ! Error code
    -                                        ! 0 on success and -1 on failure
    - 
    -END SUBROUTINE h5pget_fill_value_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pget_filter -
    Signature: -
    H5Z_filter_t H5Pget_filter(hid_t plist, - int filter_number, - unsigned int *flags, - size_t *cd_nelmts, - unsigned int *cd_values, - size_t namelen, - char name[] - ) -
    Purpose: -
    Returns information about a filter in a pipeline. -
    Description: -
    H5Pget_filter returns information about a - filter, specified by its filter number, in a filter pipeline, - specified by the property list with which it is associated. -

    - If plist is a dataset creation property list, - the pipeline is a permanent filter pipeline; - if plist is a dataset transfer property list, - the pipeline is a transient filter pipeline. -

    - On input, cd_nelmts indicates the number of entries - in the cd_values array, as allocated by the caller; - on return,cd_nelmts contains the number of values - defined by the filter. -

    - filter_number is a value between zero and - N-1, as described in - H5Pget_nfilters. - The function will return a negative value if the filter number - is out of range. -

    - If name is a pointer to an array of at least - namelen bytes, the filter name will be copied - into that array. The name will be null terminated if - namelen is large enough. The filter name returned - will be the name appearing in the file, the name registered - for the filter, or an empty string. -

    - The structure of the flags argument is discussed - in H5Pset_filter. -

    Note: -
    This function currently supports only the permanent filter - pipeline; plist must be a dataset creation property - list. -
    Parameters: -
      - - - - - - - - - - - - - - - - - - - - - -
      hid_t plistIN: Property list identifier.
      int filter_numberIN: Sequence number within the filter pipeline of - the filter for which information is sought.
      unsigned int *flagsOUT: Bit vector specifying certain general properties - of the filter.
      size_t *cd_nelmtsIN/OUT: Number of elements in cd_values.
      unsigned int *cd_values    OUT: Auxiliary data for the filter.
      size_t namelenIN: Anticipated number of characters in name.
      char name[]OUT: Name of the filter.
    - -
    Returns: -
    Returns the filter identifier if successful: -
    - - - - - -
    - H5Z_FILTER_DEFLATE - - Data compression filter, employing the gzip algorithm -
    - H5Z_FILTER_SHUFFLE - - Data shuffling filter -
    - H5Z_FILTER_FLETCHER32   - - Error detection filter, employing the Fletcher32 checksum algorithm -
    - H5Z_FILTER_SZIP - - Data compression filter, employing the SZIP algorithm -
    -
    - Otherwise returns a negative value. -
    Fortran90 Interface: h5pget_filter_f -
    -
    -SUBROUTINE h5pget_filter_f(prp_id, filter_number, flags, cd_nelmts, 
    -                           cd_values, namelen, name, filter_id, hdferr)
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: prp_id    ! Property list identifier
    -  INTEGER, INTENT(IN) :: filter_number    ! Sequence number within the filter
    -                                          ! pipeline of the filter for which
    -                                          ! information is sought
    -  INTEGER, DIMENSION(*), INTENT(OUT) :: cd_values  
    -                                          ! Auxiliary data for the filter
    -  INTEGER, INTENT(OUT) :: flags           ! Bit vector specifying certain 
    -                                          ! general properties of the filter
    -  INTEGER(SIZE_T), INTENT(INOUT) :: cd_nelmts      
    -                                          ! Number of elements in cd_values
    -  INTEGER(SIZE_T), INTENT(IN) :: namelen  ! Anticipated number of characters 
    -                                          ! in name
    -  CHARACTER(LEN=*), INTENT(OUT) :: name   ! Name of the filter
    -  INTEGER, INTENT(OUT) :: filter_id       ! Filter identification number
    -  INTEGER, INTENT(OUT) :: hdferr          ! Error code
    -                                          ! 0 on success and -1 on failure
    -END SUBROUTINE h5pget_filter_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pget_filter_by_id -
    Signature: -
    herr_t H5Pget_filter_by_id( - hid_t plist_id, - H5Z_filter_t filter, - unsigned int *flags, - size_t *cd_nelmts, - unsigned int cd_values[], - size_t namelen, - char name[] - ) -
    Purpose: -
    Returns information about the specified filter. -
    Description: -
    H5Pget_filter_by_id returns information about the - filter specified in filter, a filter identifier. -

    - plist_id must identify a dataset creation property list - and filter will be in a permanent filter pipeline. -

    - The filter and flags parameters are used - in the same manner as described in the discussion of - H5Pset_filter. -

    - Aside from the fact that they are used for output, the - parameters cd_nelmts and cd_values[] are - used in the same manner as described in the discussion - of H5Pset_filter. - On input, the cd_nelmts parameter indicates the - number of entries in the cd_values[] array - allocated by the calling program; on exit it contains the - number of values defined by the filter. -

    - On input, the name_len parameter indicates the - number of characters allocated for the filter name - by the calling program in the array name[]. - On exit it contains the length in characters of name of the filter. - On exit name[] contains the name of the filter - with one character of the name in each element of the array. -

    - If the filter specified in filter is not - set for the property list, an error will be returned - and H5Pget_filter_by_id will fail. -

    Parameters: -
      - - - - - - - - - - - - - - - - - - - - - -
      hid_t plist_idIN: Property list identifier.
      H5Z_filter_t filterIN: Filter identifier.
      unsigned int flagsOUT: Bit vector specifying certain general properties - of the filter.
      size_t cd_nelmtsIN/OUT: Number of elements in cd_values.
      const unsigned int cd_values[]    OUT: Auxiliary data for the filter.
      size_t namelenIN/OUT: Length of filter name and - number of elements in name[].
      char *name[]OUT: Name of filter.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. - -
    Fortran90 Interface: h5pget_filter_by_id_f -
    -
    -SUBROUTINE h5pget_filter_by_id_f(prp_id, filter_id, flags, cd_nelmts,
    -                                 cd_values, namelen, name, hdferr)
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: prp_id      ! Property list identifier
    -  INTEGER, INTENT(IN)        :: filter_id   ! Filter identifier
    -  INTEGER(SIZE_T), INTENT(INOUT)     :: cd_nelmts  
    -                                            ! Number of elements in cd_values
    -  INTEGER, DIMENSION(*), INTENT(OUT) :: cd_values  
    -                                            ! Auxiliary data for the filter
    -  INTEGER, INTENT(OUT)          :: flags    ! Bit vector specifying certain 
    -                                            ! general properties of the filter
    -  INTEGER(SIZE_T), INTENT(IN)   :: namelen  ! Anticipated number of characters 
    -                                            ! in name
    -  CHARACTER(LEN=*), INTENT(OUT) :: name     ! Name of the filter
    -  INTEGER, INTENT(OUT)          :: hdferr   ! Error code
    -                                            ! 0 on success and -1 on failure
    -END SUBROUTINE h5pget_filter_by_id_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pget_gc_references -
    Signature: -
    herr_t H5Pget_gc_references(hid_t plist, - unsigned *gc_ref - ) -
    Purpose: -
    Returns garbage collecting references setting. -
    Description: -
    H5Pget_gc_references returns the current setting - for the garbage collection references property from - the specified file access property list. - The garbage collection references property is set - by H5Pset_gc_references. -
    Parameters: -
      - - - - - - -
      hid_t plistIN: File access property list identifier.
      unsigned gc_ref    OUT: Flag returning the state of reference garbage collection. - A returned value of 1 indicates that - garbage collection is on while - 0 indicates that garbage collection is off.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5pget_gc_references_f -
    -
    -SUBROUTINE h5pget_gc_references_f (prp_id, gc_reference, hdferr)
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: prp_id ! Property list identifier
    -  INTEGER, INTENT(OUT) :: gc_reference ! The flag for garbage collecting
    -                                       ! references for the file
    -  INTEGER, INTENT(OUT) :: hdferr       ! Error code
    -                                       ! 0 on success and -1 on failure
    -END SUBROUTINE h5pget_gc_references_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pget_hyper_vector_size -
    Signature: -
    herr_t H5Pget_hyper_vector_size(hid_t dxpl_id, - size_t *vector_size - ) -
    Purpose: -
    Retrieves number of I/O vectors to be read/written in hyperslab I/O. -
    Description: -
    H5Pset_hyper_vector_size retrieves the number of - I/O vectors to be accumulated in memory before being issued - to the lower levels of the HDF5 library for reading or writing the - actual data. -

    - The number of I/O vectors set in the dataset transfer property list - dxpl_id is returned in vector_size. - Unless the default value is in use, vector_size - was previously set with a call to - H5Pset_hyper_vector_size. -

    Parameters: -
      - - - - - - -
      hid_t dxpl_idIN: Dataset transfer property list identifier.
      size_t *vector_size    OUT: Number of I/O vectors to accumulate in memory for I/O operations.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5pget_hyper_vector_size_f -
    -
    -SUBROUTINE h5pget_hyper_vector_size_f(plist_id, size, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: plist_id ! Dataset transfer property list 
    -                                         ! identifier
    -  INTEGER(SIZE_T), INTENT(OUT) :: size   ! Vector size 
    -  INTEGER, INTENT(OUT)       :: hdferr   ! Error code
    -                                         ! 0 on success and -1 on failure
    -END SUBROUTINE h5pget_hyper_vector_size_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pget_istore_k -
    Signature: -
    herr_t H5Pget_istore_k(hid_t plist, - unsigned * ik - ) -
    Purpose: -
    Queries the 1/2 rank of an indexed storage B-tree. -
    Description: -
    H5Pget_istore_k queries the 1/2 rank of - an indexed storage B-tree. - The argument ik may be the null pointer (NULL). - This function is only valid for file creation property lists. -

    - See H5Pset_istore_k for details. -

    Parameters: -
      - - - - - - -
      hid_t plist    IN: Identifier of property list to query.
      unsigned * ikOUT: Pointer to location to return the chunked storage B-tree 1/2 rank.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5pget_istore_k_f -
    -
    -SUBROUTINE h5pget_istore_k_f(prp_id, ik, hdferr)
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: prp_id ! Property list identifier
    -  INTEGER, INTENT(OUT) :: ik           ! 1/2 rank of chunked storage B-tree
    -  INTEGER, INTENT(OUT) :: hdferr       ! Error code
    -                                       ! 0 on success and -1 on failure
    -END SUBROUTINE h5pget_istore_k_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pget_layout -
    Signature: -
    H5D_layout_t H5Pget_layout(hid_t plist) -
    Purpose: -
    Returns the layout of the raw data for a dataset. -
    Description: -
    H5Pget_layout returns the layout of the raw data for - a dataset. This function is only valid for dataset creation - property lists. -

    - Note that a compact storage layout may affect writing data to - the dataset with parallel applications. See note in - H5Dwrite - documentation for details. - -

    Parameters: -
      - - - -
      hid_t plist    IN: Identifier for property list to query.
    -
    Returns: -
    Returns the layout type (a non-negative value) - of a dataset creation property list if successful. - Valid return values are: -
      -
      H5D_COMPACT -
      Raw data is stored in the object header in the file. -
      H5D_CONTIGUOUS -
      Raw data is stored separately from the object header in - one contiguous chunk in the file. -
      H5D_CHUNKED -
      Raw data is stored separately from the object header in - chunks in separate locations in the file. -
    -

    - Otherwise, returns a negative value indicating failure. -

    Fortran90 Interface: h5pget_layout_f -
    -
    -SUBROUTINE h5pget_layout_f (prp_id, layout, hdferr)
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: prp_id ! Property list identifier
    -  INTEGER, INTENT(OUT) :: layout       ! Type of storage layout for raw data
    -                                       ! possible values are:
    -                                       !    H5D_COMPACT_F
    -                                       !    H5D_CONTIGUOUS_F
    -                                       !    H5D_CHUNKED_F
    -  INTEGER, INTENT(OUT) :: hdferr       ! Error code
    -                                       ! 0 on success and -1 on failure
    -END SUBROUTINE h5pget_layout_f 
    -	
    - - -
    - - - - - - - -
    -
    -
    Name: H5Pget_meta_block_size -
    Signature: -
    herr_t H5Pget_meta_block_size( - hid_t fapl_id, - hsize_t *size - ) -
    Purpose: -
    Returns the current metadata block size setting. -
    Description: -
    H5Pget_meta_block_size returns the current - minimum size, in bytes, of new metadata block allocations. - This setting is retrieved from the file access property list - fapl_id. -

    - This value is set by - H5Pset_meta_block_size - and is retrieved from the file access property list - fapl_id. -

    Parameters: -
      - - - - - - -
      hid_t fapl_id    IN: File access property list identifier.
      hsize_t *sizeOUT: Minimum size, in bytes, of metadata block allocations.
    -
    Returns: -
    Returns a non-negative value if successful. - Otherwise returns a negative value. -
    Fortran90 Interface: h5pget_meta_block_size_f -
    -
    -SUBROUTINE h5pget_meta_block_size_f(plist_id, size, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: plist_id ! File access property list 
    -                                         ! identifier
    -  INTEGER(HSIZE_T), INTENT(OUT) :: size  ! Metadata block size
    -  INTEGER, INTENT(OUT)       :: hdferr   ! Error code
    -                                         ! 0 on success and -1 on failure
    -END SUBROUTINE h5pget_meta_block_size_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pget_multi_type -
    Signature: -
    herr_t H5Pset_multi_type ( - hid_t fapl_id, - H5FD_mem_t *type - ) -
    Purpose: -
    Retrieves data type property for MULTI driver. -
    Description: -
    H5Pget_multi_type retrieves the data type setting from the - file access or data transfer property list fapl_id. - This enables a user application to specify the type of data the - application wishes to access so that the application - can retrieve a file handle for low-level access to the particular member - of a set of MULTI files in which that type of data is stored. - The file handle is retrieved with a separate call - to H5Fget_vfd_handle - (or, in special circumstances, to H5FDget_vfd_handle; - see Virtual File Layer and List of VFL Functions - in HDF5 Technical Notes). -

    - The type of data returned in type will be one of those - listed in the discussion of the type parameter in the the - description of the function - H5Pset_multi_type. -

    - Use of this function is only appropriate for an HDF5 file written - as a set of files with the MULTI file driver. -

    Parameters: -
      - - - - - - -
      hid_t fapl_idIN: File access property list or data transfer property list identifier.
      H5FD_mem_t *type    OUT: Type of data.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: -
    None. - - - -
    - - - -
    -
    -
    Name: H5Pget_nfilters -
    Signature: -
    int H5Pget_nfilters(hid_t plist) -
    Purpose: -
    Returns the number of filters in the pipeline. -
    Description: -
    H5Pget_nfilters returns the number of filters - defined in the filter pipeline associated with the property list - plist. -

    - In each pipeline, the filters are numbered from - 0 through N-1, where N is the value returned - by this function. During output to the file, the filters are - applied in increasing order; during input from the file, they - are applied in decreasing order. -

    - H5Pget_nfilters returns the number of filters - in the pipeline, including zero (0) if there - are none. -

    Note: -
    This function currently supports only the permanent filter - pipeline; plist_id must be a dataset creation - property list. -
    Parameters: -
      - - - -
      hid_t plist    IN: Property list identifier.
    -
    Returns: -
    Returns the number of filters in the pipeline if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5pget_nfilters_f -
    -
    -SUBROUTINE h5pget_nfilters_f(prp_id, nfilters, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: prp_id   ! Dataset creation property 
    -                                         ! list identifier 
    -  INTEGER, INTENT(OUT) :: nfilters       ! The number of filters in 
    -                                         ! the pipeline
    -  INTEGER, INTENT(OUT)       :: hdferr   ! Error code
    -                                         ! 0 on success and -1 on failure
    -END SUBROUTINE h5pget_nfilters_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pget_nprops - -
    Signature: -
    int H5Pget_nprops( - hid_t id, - size_t *nprops - ) - -
    Purpose: -
    Queries number of properties in property list or class. - -
    Description: -
    H5Pget_nprops retrieves the number of properties in a - property list or class. - If a property class identifier is given, the number of registered - properties in the class is returned in nprops. - If a property list identifier is given, the current number of - properties in the list is returned in nprops. - -
    Parameters: -
      - - - - - - -
      hid_t idIN: Identifier of property object to query
      size_t *nprops    OUT: Number of properties in object
    - -
    Returns: -
    Success: a non-negative value -
    Failure: a negative value - -
    Fortran90 Interface: h5pget_nprops_f -
    -
    -SUBROUTINE h5pget_nprops_f(prp_id, nprops, hdferr)
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: prp_id    ! Property list identifier 
    -  INTEGER(SIZE_T), INTENT(OUT) :: nprops  ! Number of properties
    -  INTEGER, INTENT(OUT) :: hdferr          ! Error code
    -                                          ! 0 on success and -1 on failure
    -END SUBROUTINE h5pget_nprops_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pget_preserve -
    Signature: -
    int H5Pget_preserve(hid_t plist) -
    Purpose: -
    Checks status of the dataset transfer property list. -
    Description: -
    H5Pget_preserve checks the status of the - dataset transfer property list. -
    Parameters: -
      - - - -
      hid_t plist    IN: Identifier for the dataset transfer property list.
    -
    Returns: -
    Returns TRUE or FALSE if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5pget_preserve_f -
    -
    -SUBROUTINE h5pget_preserve_f(prp_id, flag, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: prp_id   ! Dataset transfer property 
    -                                         ! list identifier 
    -  LOGICAL, INTENT(OUT)       :: flag     ! Status of for the dataset 
    -                                         ! transfer property list 
    -  INTEGER, INTENT(OUT)       :: hdferr   ! Error code
    -                                         ! 0 on success and -1 on failure
    -END SUBROUTINE h5pget_preserve_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pget_sieve_buf_size -
    Signature: -
    herr_t H5Pget_sieve_buf_size( - hid_t fapl_id, - hsize_t *size - ) -
    Purpose: -
    Returns maximum data sieve buffer size. -
    Description: -
    H5Pget_sieve_buf_size retrieves, size, - the current maximum size of the data sieve buffer. -

    - This value is set by - H5Pset_sieve_buf_size - and is retrieved from the file access property list - fapl_id. -

    Parameters: -
      - - - - - - -
      hid_t fapl_id    IN: File access property list identifier.
      hsize_t *sizeIN: Maximum size, in bytes, of data sieve buffer.
    -
    Returns: -
    Returns a non-negative value if successful. - Otherwise returns a negative value. -
    Fortran90 Interface: h5pget_sieve_buf_size_f -
    -
    -SUBROUTINE h5pget_sieve_buf_size_f(plist_id, size, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: plist_id ! File access property list 
    -                                         ! identifier
    -  INTEGER(SIZE_T), INTENT(OUT) :: size   ! Sieve buffer size 
    -  INTEGER, INTENT(OUT)       :: hdferr   ! Error code
    -                                         ! 0 on success and -1 on failure
    -END SUBROUTINE h5pget_sieve_buf_size_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pget_size - -
    Signature: -
    int H5Pget_size( - hid_t id, - const char *name, - size_t *size - ) - -
    Purpose: -
    Queries the size of a property value in bytes. - -
    Description: -
    H5Pget_size retrieves the size of a - property's value in bytes. This function operates on both - property lists and property classes - -

    - Zero-sized properties are allowed and return 0. - - -

    Parameters: -
      - - - - - - - - - -
      hid_t idIN: Identifier of property object to query
      const char *name    IN: Name of property to query
      size_t *sizeOUT: Size of property in bytes
    - -
    Returns: -
    Success: a non-negative value -
    Failure: a negative value - -
    Fortran90 Interface: h5pget_size_f -
    -
    -SUBROUTINE h5pget_size_f(prp_id, name, size, hdferr)
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: prp_id  ! Property list identifier 
    -  CHARACTER(LEN=*), INTENT(IN) :: name  ! Name of property to query
    -  INTEGER(SIZE_T), INTENT(OUT) :: size  ! Size in bytes
    -  INTEGER, INTENT(OUT) :: hdferr        ! Error code
    -                                        ! 0 on success and -1 on failure
    -END SUBROUTINE h5pget_size_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pget_sizes -
    Signature: -
    herr_t H5Pget_sizes(hid_t plist, - size_t * sizeof_addr, - size_t * sizeof_size - ) -
    Purpose: -
    Retrieves the size of the offsets and lengths used in an HDF5 file. -
    Description: -
    H5Pget_sizes retrieves the size of the offsets - and lengths used in an HDF5 file. - This function is only valid for file creation property lists. -
    Parameters: -
      - - - - - - - - - -
      hid_t plistIN: Identifier of property list to query.
      size_t * size    OUT: Pointer to location to return offset size in bytes.
      size_t * sizeOUT: Pointer to location to return length size in bytes.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5pget_sizes_f -
    -
    -SUBROUTINE h5pget_sizes_f(prp_id, sizeof_addr, sizeof_size, hdferr)
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: prp_id  ! Property list identifier
    -  INTEGER(SIZE_T), DIMENSION(:), INTENT(OUT) :: sizeof_addr
    -                                        ! Size of an object address in bytes
    -  INTEGER(SIZE_T), DIMENSION(:), INTENT(OUT) :: sizeof_size 
    -                                        ! Size of an object in bytes
    -  INTEGER, INTENT(OUT) :: hdferr        ! Error code
    -                                        ! 0 on success and -1 on failure
    -END SUBROUTINE h5pget_sizes_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pget_small_data_block_size -
    Signature: -
    herr_t H5Pget_small_data_block_size(hid_t fapl_id, - hsize_t *size - ) -
    Purpose: -
    Retrieves the current small data block size setting. -
    Description: -
    H5Pget_small_data_block_size retrieves the - current setting for the size of the small data block. -

    - If the returned value is zero (0), the small data - block mechanism has been disabled for the file. -

    Parameters: -
      - - - - - - -
      hid_t fapl_id    IN: File access property list identifier.
      hsize_t *sizeOUT: Maximum size, in bytes, of the small data block.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise a negative value. -
    Fortran90 Interface: h5pget_small_data_block_size_f -
    -
    -SUBROUTINE h5pget_small_data_block_size_f(plist_id, size, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: plist_id  ! File access property list 
    -                                          ! identifier
    -  INTEGER(HSIZE_T), INTENT(OUT) :: size   ! Small raw data block size
    -  INTEGER, INTENT(OUT)       :: hdferr    ! Error code
    -                                          ! 0 on success and -1 on failure
    -END SUBROUTINE h5pget_small_data_block_size_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pget_sym_k -
    Signature: -
    herr_t H5Pget_sym_k(hid_t plist, - unsigned * ik, - unsigned * lk - ) -
    Purpose: -
    Retrieves the size of the symbol table B-tree 1/2 rank - and the symbol table leaf node 1/2 size. -
    Description: -
    H5Pget_sym_k retrieves the size of the - symbol table B-tree 1/2 rank and the symbol table leaf - node 1/2 size. This function is only valid for file creation - property lists. If a parameter valued is set to NULL, that - parameter is not retrieved. See the description for - H5Pset_sym_k for more - information. -
    Parameters: -
      - - - - - - - - - -
      hid_t plist    IN: Property list to query.
      unsigned * ikOUT: Pointer to location to return the symbol table's B-tree 1/2 rank.
      unsigned * sizeOUT: Pointer to location to return the symbol table's leaf node 1/2 size.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5pget_sym_k_f -
    -
    -SUBROUTINE h5pget_sym_k_f(prp_id, ik, lk, hdferr)
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: prp_id  ! Property list identifier
    -  INTEGER, INTENT(OUT) :: ik            ! Symbol table tree rank
    -  INTEGER, INTENT(OUT) :: lk            ! Symbol table node size
    -  INTEGER, INTENT(OUT) :: hdferr        ! Error code
    -                                        ! 0 on success and -1 on failure
    -END SUBROUTINE h5pget_sym_k_f
    -	
    - - -
    - - - - - -
    -
    -
    Name: H5Pget_userblock -
    Signature: -
    herr_t H5Pget_userblock(hid_t plist, - hsize_t * size - ) -
    Purpose: -
    Retrieves the size of a user block. -
    Description: -
    H5Pget_userblock retrieves the size of a user block - in a file creation property list. -
    Parameters: -
      - - - - - - -
      hid_t plistIN: Identifier for property list to query.
      hsize_t * size    OUT: Pointer to location to return user-block size.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5pget_userblock_f -
    -
    -SUBROUTINE h5pget_userblock_f(prp_id, block_size, hdferr)
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: prp_id   ! Property list identifier
    -  INTEGER(HSIZE_T), DIMENSION(:), INTENT(OUT) ::  block_size 
    -                                         ! Size of the user-block in bytes
    -  INTEGER, INTENT(OUT) :: hdferr         ! Error code
    -                                         ! 0 on success and -1 on failure
    -END SUBROUTINE h5pget_userblock_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pget_version -
    Signature: -
    herr_t H5Pget_version(hid_t plist, - unsigned * super, - unsigned * freelist, - unsigned * stab, - unsigned * shhdr - ) -
    Purpose: -
    Retrieves the version information of various objects for - a file creation property list. -
    Description: -
    H5Pget_version retrieves the version information of various objects - for a file creation property list. Any pointer parameters which are - passed as NULL are not queried. -
    Parameters: -
      - - - - - - - - - - - - - - - -
      hid_t plistIN: Identifier of the file creation property list.
      unsigned * superOUT: Pointer to location to return super block version number.
      unsigned * freelist    OUT: Pointer to location to return global freelist version number.
      unsigned * stabOUT: Pointer to location to return symbol table version number.
      unsigned * shhdrOUT: Pointer to location to return shared object header version number.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5pget_version_f -
    -
    -SUBROUTINE h5pget_version_f(prp_id, boot, freelist, & 
    -                            stab, shhdr, hdferr)
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: prp_id         ! Property list identifier
    -  INTEGER, DIMENSION(:), INTENT(OUT) :: boot   ! Array to put boot block 
    -                                               ! version number
    -  INTEGER, DIMENSION(:), INTENT(OUT) :: freelist  
    -                                               ! Array to put global
    -                                               ! freelist version number
    -  INTEGER, DIMENSION(:), INTENT(OUT) :: stab   ! Array to put symbol table
    -                                               ! version number
    -  INTEGER, DIMENSION(:), INTENT(OUT) :: shhdr  ! Array to put shared object 
    -                                               ! header version number
    -  INTEGER, INTENT(OUT) :: hdferr               ! Error code
    -                                               ! 0 on success and -1 on failure
    -END SUBROUTINE h5pget_version_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pget_vlen_mem_manager -
    Signature: -
    herr_t H5Pget_vlen_mem_manager(hid_t plist, - H5MM_allocate_t *alloc, - void **alloc_info, - H5MM_free_t *free, - void **free_info - ) -
    Purpose: -
    Gets the memory manager for variable-length datatype allocation in - H5Dread and H5Dvlen_reclaim. -
    Description: -
    H5Pget_vlen_mem_manager is the companion function to - H5Pset_vlen_mem_manager, returning the parameters - set by that function. -
    Parameters: -
      - - - - - - - - - - - - - - - -
      hid_t plistIN: Identifier for the dataset transfer property list.
      H5MM_allocate_t alloc    OUT: User's allocate routine, or   NULL - for system   malloc.
      void *alloc_infoOUT: Extra parameter for user's allocation routine. -
      - Contents are ignored if preceding parameter is   - NULL.
      H5MM_free_t freeOUT: User's free routine, or   NULL for - system free.
      void *free_infoOUT: Extra parameter for user's free routine. -
      - Contents are ignored if preceding parameter is   - NULL.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: -
    None. - - - -
    - - - -
    -
    -
    Name: H5Pinsert - -
    Signature: -
    herr_t H5Pinsert( - hid_t plid, - const char *name, - size_t size, - void *value, - H5P_prp_set_func_t set, - H5P_prp_get_func_t get, - H5P_prp_delete_func_t delete, - H5P_prp_copy_func_t copy, - H5P_prp_compare_func_t compare, - H5P_prp_close_func_t close - ) - -
    Purpose: -
    Registers a temporary property with a property list. - -
    Description: -
    H5Pinsert create a new property in a property list. - The property will exist only in this property list and copies made - from it. - -

    - The initial property value must be provided in - value and the property value will be set accordingly. - -

    - The name of the property must not already exist in this list, - or this routine will fail. - -

    - The set and get callback routines may - be set to NULL if they are not needed. - -

    - Zero-sized properties are allowed and do not store any data in the - property list. The default value of a zero-size property may be set - to NULL. They may be used to indicate the presence or absence of a - particular piece of information. -

    - - The set routine is called before a new value is copied - into the property. - The H5P_prp_set_func_t callback function is defined - as follows: -
      typedef herr_t (*H5P_prp_set_func_t)( - hid_t prop_id, - const char *name, - size_t size, - void *new_value); -
    - The parameters to the callback function are defined as follows: -
      - - - - - - - - - - - - -
      hid_t prop_idIN: The identifier of the property list being modified
      const char *nameIN: The name of the property being modified
      size_t sizeIN: The size of the property in bytes
      void **new_valueIN: Pointer to new value pointer for the property being - modified
    - The set routine may modify the value pointer to be set - and those changes will be used when setting the property's value. - If the set routine returns a negative value, the new - property value is not copied into the property and the set routine - returns an error value. - The set routine will be called for the initial value. -

    - Note: - The set callback function may be useful - to range check the value being set for the property - or may perform some transformation or translation of the - value set. The get callback would then - reverse the transformation or translation. - A single get or set callback - could handle multiple properties by - performing different actions based on the - property name or other properties in the property list. - -

    - The get routine is called when a value is retrieved - from a property value. - The H5P_prp_get_func_t callback function is defined - as follows: -

      typedef herr_t (*H5P_prp_get_func_t)( - hid_t prop_id, - const char *name, - size_t size, - void *value); -
    - where the parameters to the callback function are: -
      - - - - - - - - - - - - -
      hid_t prop_idIN: The identifier of the property list being queried
      const char *nameIN: The name of the property being queried
      size_t sizeIN: The size of the property in bytes
      void *valueIN: The value of the property being returned
    - The get routine may modify the value to be returned from - the query and those changes will be preserved. - If the get routine returns a negative value, the query - routine returns an error value. -

    - -

    - The delete routine is called when a property is being - deleted from a property list. - The H5P_prp_delete_func_t callback function is defined - as follows: -

      typedef herr_t (*H5P_prp_delete_func_t)( - hid_t prop_id, - const char *name, - size_t size, - void *value); -
    - where the parameters to the callback function are: -
      - - - - - - - - - - - - -
      hid_t prop_idIN: The identifier of the property list the property is - being deleted from
      const char * nameIN: The name of the property in the list
      size_t sizeIN: The size of the property in bytes
      void * valueIN: The value for the property being deleted
    - The delete routine may modify the value passed in, - but the value is not used by the library when the delete - routine returns. If the delete routine returns a - negative value, the property list delete routine returns an - error value but the property is still deleted. -

    - -

    - The copy routine is called when a new property list - with this property is being created through a copy operation. - The H5P_prp_copy_func_t callback function is defined - as follows: -

      typedef herr_t (*H5P_prp_copy_func_t)( - const char *name, - size_t size, - void *value); -
    - where the parameters to the callback function are: -
      - - - - - - - - - -
      const char *nameIN: The name of the property being copied
      size_t sizeIN: The size of the property in bytes
      void * valueIN/OUT: The value for the property being copied
    - The copy routine may modify the value to be set and - those changes will be stored as the new value of the property. - If the copy routine returns a negative value, the - new property value is not copied into the property and the - copy routine returns an error value. -

    - -

    - The compare routine is called when a property list with - this property is compared to another property list with the same property. - The H5P_prp_compare_func_t callback function is defined - as follows: -

      typedef int (*H5P_prp_compare_func_t)( - const void *value1, - const void *value2, - size_t size); -
    - The parameters to the callback function are defined as follows: -
      - - - - - - - - - -
      const void *value1IN: The value of the first property to compare
      const void *value2IN: The value of the second property to compare
      size_t sizeIN: The size of the property in bytes
    - The compare routine may not modify the values. - The compare routine should return a positive value if - value1 is greater than value2, a negative value - if value2 is greater than value1 and zero if - value1 and value2 are equal. -

    - -

    The close routine is called when a property list - with this property is being closed. - The H5P_prp_close_func_t callback function is defined - as follows: -

      typedef herr_t (*H5P_prp_close_func_t)( - hid_t prop_id, - const char *name, - size_t size, - void *value); -
    - The parameters to the callback function are defined as follows: -
      - - - - - - - - - - - - -
      hid_t prop_idIN: The ID of the property list being closed
      const char *nameIN: The name of the property in the list
      size_t sizeIN: The size of the property in bytes
      void *valueIN: The value for the property being closed
    - The close routine may modify the value passed in, the value - is not used by the library when the close routine returns. - If the close routine returns a negative value, the - property list close routine returns an error value but the property list - is still closed. - -

    - Note: - There is no create callback routine for temporary property - list objects; the initial value is assumed to have any necessary setup - already performed on it. - - -

    Parameters: -
      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
      hid_t plidIN: Property list identifier to create temporary property - within
      const char *nameIN: Name of property to create
      size_t sizeIN: Size of property in bytes
      void *valueIN: Initial value for the property
      H5P_prp_set_func_t setIN: Callback routine called before a new value is copied into - the property's value
      H5P_prp_get_func_t getIN: Callback routine called when a property value is retrieved - from the property
      H5P_prp_delete_func_t delete    IN: Callback routine called when a property is deleted from - a property list
      H5P_prp_copy_func_t copyIN: Callback routine called when a property is copied from - an existing property list
      H5P_prp_compare_func_t compareIN: Callback routine called when a property is compared with - another property list
      H5P_prp_close_func_t closeIN: Callback routine called when a property list is being closed - and the property value will be disposed of
    - -
    Returns: -
    Success: a non-negative value -
    Failure: a negative value - -
    Fortran90 Interface: h5pinsert_f -
    -
    -SUBROUTINE h5pinsert_f
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: plist   ! Property list class identifier 
    -  CHARACTER(LEN=*), INTENT(IN) :: name  ! Name of property to insert
    -  INTEGER(SIZE_T), INTENT(IN) :: size   ! Size of the property value	
    -  TYPE,   INTENT(IN) :: value           ! Property value
    -                                        ! Supported types are:
    -                                        !    INTEGER
    -                                        !    REAL
    -                                        !    DOUBLE PRECISION
    -                                        !    CHARACTER(LEN=*)
    -  INTEGER, INTENT(OUT) :: hdferr        ! Error code
    -                                        ! 0 on success and -1 on failure
    -END SUBROUTINE h5pinsert_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pisa_class - -
    Signature: -
    htri_t H5Pisa_class( - hid_t plist, - hid_t pclass - ) - -
    Purpose: -
    Determines whether a property list is a member of a class. - -
    Description: -
    H5Pisa_class checks to determine whether a property list - is a member of the specified class. - -
    Parameters: -
      - - - - - - -
      hid_t plistIN: Identifier of the property list
      hid_t pclass    IN: Identifier of the property class
    - -
    Returns: -
    Success: TRUE (positive) if equal; FALSE (zero) if unequal -
    Failure: a negative value - -
    Fortran90 Interface: h5pisa_class_f -
    -
    -SUBROUTINE h5pisa_class_f(plist, pclass, flag, hdferr)
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: plist     ! Property list identifier 
    -  INTEGER(HID_T), INTENT(IN) :: pclass    ! Class identifier
    -  LOGICAL, INTENT(OUT) :: flag            ! Logical flag
    -                                          !    .TRUE. if a member
    -                                          !    .FALSE. otherwise
    -  INTEGER, INTENT(OUT) :: hdferr          ! Error code
    -                                          ! 0 on success and -1 on failure
    -END SUBROUTINE h5pisa_class_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Piterate - -
    Purpose: -
    Iterates over properties in a property class or list. - -
    Signature: -
    int H5Piterate( - hid_t id, - int * idx, - H5P_iterate_t iter_func, - void * iter_data - ) - -
    Description: -

    H5Piterate iterates over the properties in the - property object specified in id, which may be either a - property list or a property class, performing a specified - operation on each property in turn. - -

    - For each property in the object, iter_func and - the additional information specified below are passed to the - H5P_iterate_t operator function. - - (NOTE: iter_func was changed to - H5P_iterate_t in the preceding sentence. - Is this correct?) - -

    - The iteration begins with the idx-th property in - the object; the next element to be processed by the operator - is returned in idx. - If idx is NULL, the iterator starts at the first - property; since no stopping point is returned in this case, - the iterator cannot be restarted if one of the calls to its - operator returns non-zero. -

    - - The prototype for the H5P_iterate_t operator is - as follows: -
      -
      typedef herr_t (*H5P_iterate_t)( - hid_t id, - const char *>name, - void *iter_data - ) -
    - The operation receives the property list or class identifier for - the object being iterated over, id, - the name of the current property within the object, name, - and the pointer to the operator data passed in to - H5Piterate, iter_data. -

    - - The valid return values from an operator are as follows: -
      - - - - - - - - - -
      ZeroCauses the iterator to continue, returning zero when all - properties have been processed
      PositiveCauses the iterator to immediately return that positive - value, indicating short-circuit success. The iterator can - be restarted at the index of the next property
      NegativeCauses the iterator to immediately return that value, - indicating failure. The iterator can be restarted at the - index of the next property
    - -

    - H5Piterate assumes that the properties in the object - identified by id remain unchanged through the iteration. - If the membership changes during the iteration, the function's behavior - is undefined. - -

    Parameters: -
      - - - - - - - - - - - - -
      hid_t idIN: Identifier of property object to iterate over
      int * idxIN/OUT: Index of the property to begin with
      H5P_iterate_t iter_func    IN: Function pointer to function to be called with each - property iterated over
      void * iter_dataIN/OUT: Pointer to iteration data from user
    - -
    Returns: -
    Success: the return value of the last call to - iter_func if it was non-zero; - zero if all properties have been processed -
    Failure: a negative value -
- -
Fortran90 Interface: -
None. - - - - - - - -
-
-
Name: H5Pmodify_filter -
Signature: -
herr_t H5Pmodify_filter(hid_t plist, - H5Z_filter_t filter, - unsigned int flags, - size_t cd_nelmts, - const unsigned int cd_values[] - ) -
Purpose: -
Modifies a filter in the filter pipeline. -
Description: -
H5Pmodify_filter modifies the specified - filter in the filter pipeline. - plist must be a dataset creation property list - and the modified filter will be in a permanent filter pipeline. -

- The filter, flags - cd_nelmts[], and cd_values parameters - are used in the same manner and accept the same values as described - in the discussion of H5Pset_filter. -

Note: -
This function currently supports only the permanent filter - pipeline; plist_id must be a dataset creation - property list. -
Parameters: -
    - - - - - - - - - - - - - - - -
    hid_t plist_idIN: Property list identifier.
    H5Z_filter_t filterIN: Filter to be modified.
    unsigned int flagsIN: Bit vector specifying certain general properties - of the filter.
    size_t cd_nelmtsIN: Number of elements in cd_values.
    const unsigned int cd_values[]    IN: Auxiliary data for the filter.
-
Returns: -
Returns a non-negative value if successful; - otherwise returns a negative value. - -
Fortran90 Interface: h5pmodify_filter_f -
-
-SUBROUTINE h5pmodify_filter_f(prp_id, filter, flags, cd_nelmts, & 
-                              cd_values, hdferr) 
-  IMPLICIT NONE
-  INTEGER(HID_T), INTENT(IN) :: prp_id      ! Property list identifier
-  INTEGER, INTENT(IN)        :: filter      ! Filter to be modified
-  INTEGER, INTENT(IN)        :: flags       ! Bit vector specifying certain 
-                                            ! general properties of the filter
-  INTEGER(SIZE_T), INTENT(IN) :: cd_nelmts  ! Number of elements in cd_values
-  INTEGER, DIMENSION(*), INTENT(IN) :: cd_values  
-                                            ! Auxiliary data for the filter
-  INTEGER, INTENT(OUT)       :: hdferr      ! Error code
-                                            ! 0 on success and -1 on failure
-END SUBROUTINE h5pmodify_filter_f
-	
- - -
- - - -
-
-
Name: H5Pregister - -
Signature: -
herr_t H5Pregister( - hid_t class, - const char * name, - size_t size, - void * default, - H5P_prp_create_func_t create, - H5P_prp_set_func_t set, - H5P_prp_get_func_t get, - H5P_prp_delete_func_t delete, - H5P_prp_copy_func_t copy, - H5P_prp_compare_func_t compare, - H5P_prp_close_func_t close - ) - -
Purpose: -
Registers a permanent property with a property list class. - -
Description: -
H5Pregister registers a new property with a - property list class. - The property will exist in all property list objects of - class created after this routine finishes. The name - of the property must not already exist, or this routine will fail. - The default property value must be provided and all new property - lists created with this property will have the property value set - to the default value. Any of the callback routines may be set to - NULL if they are not needed. - -

- Zero-sized properties are allowed and do not store any data in the - property list. These may be used as flags to indicate the presence - or absence of a particular piece of information. The default pointer - for a zero-sized property may be set to NULL. - The property create and close callbacks - are called for zero-sized properties, but the set and - get callbacks are never called. -

- -

- The create routine is called when a new property list - with this property is being created. - The H5P_prp_create_func_t callback function is defined - as follows: -

    typedef herr_t (*H5P_prp_create_func_t)( - const char *name, - size_t size, - void *initial_value); -
- The parameters to this callback function are defined as follows: -
    - - - - - - - - - -
    const char *nameIN: The name of the property being modified
    size_t sizeIN: The size of the property in bytes
    void *initial_valueIN/OUT: The default value for the property being created, - which will be passed to H5Pregister
- The create routine may modify the value to be set and - those changes will be stored as the initial value of the property. - If the create routine returns a negative value, - the new property value is not copied into the property and the - create routine returns an error value. -

- -

- The set routine is called before a new value is copied - into the property. - The H5P_prp_set_func_t callback function is defined - as follows: -

    typedef herr_t (*H5P_prp_set_func_t)( - hid_t prop_id, - const char *name, - size_t size, - void *new_value); -
- The parameters to this callback function are defined as follows: -
    - - - - - - - - - - - - -
    hid_t prop_idIN: The identifier of the property list being modified
    const char *nameIN: The name of the property being modified
    size_t sizeIN: The size of the property in bytes
    void **new_valueIN/OUT: Pointer to new value pointer for the property being - modified
- The set routine may modify the value pointer to be set - and those changes will be used when setting the property's value. - If the set routine returns a negative value, the new - property value is not copied into the property and the - set routine returns an error value. - The set routine will not be called for the initial - value, only the create routine will be called. -

- Note: - The set callback function may be useful - to range check the value being set for the property - or may perform some transformation or translation of the - value set. The get callback would then - reverse the - - transformation or translation. - A single get or set callback - could handle multiple properties by - performing different actions based on the - property name or other properties in the property list. - -

- The get routine is called when a value is retrieved - from a property value. - The H5P_prp_get_func_t callback function is defined - as follows: -

    typedef herr_t (*H5P_prp_get_func_t)( - hid_t prop_id, - const char *name, - size_t size, - void *value); -
- The parameters to the callback function are defined as follows: -
    - - - - - - - - - - - - -
    hid_t prop_idIN: The identifier of the property list being queried
    const char * nameIN: The name of the property being queried
    size_t sizeIN: The size of the property in bytes
    void * valueIN/OUT: The value of the property being returned
- The get routine may modify the value to be returned from - the query and those changes will be returned to the calling routine. - If the set routine returns a negative value, the query - routine returns an error value. -

- -

- The delete routine is called when a property is being - deleted from a property list. - The H5P_prp_delete_func_t callback function is defined - as follows: -

    typedef herr_t (*H5P_prp_delete_func_t)( - hid_t prop_id, - const char *name, - size_t size, - void *value); -
- The parameters to the callback function are defined as follows: -
    - - - - - - - - - - - - -
    hid_t prop_idIN: The identifier of the property list the property is being - deleted from
    const char * nameIN: The name of the property in the list
    size_t sizeIN: The size of the property in bytes
    void * valueIN: The value for the property being deleted
- The delete routine may modify the value passed in, - but the value is not used by the library when the delete - routine returns. If the delete routine returns - a negative value, the property list delete routine returns - an error value but the property is still deleted. -

- -

- The copy routine is called when a new property list with - this property is being created through a copy operation. - The H5P_prp_copy_func_t callback function is defined - as follows: -

    typedef herr_t (*H5P_prp_copy_func_t)( - const char *name, - size_t size, - void *value); -
- The parameters to the callback function are defined as follows: -
    - - - - - - - - - -
    const char *nameIN: The name of the property being copied
    size_t sizeIN: The size of the property in bytes
    void *valueIN/OUT: The value for the property being copied
- The copy routine may modify the value to be set and - those changes will be stored as the new value of the property. - If the copy routine returns a negative value, - the new property value is not copied into the property and - the copy routine returns an error value. -

- -

- The compare routine is called when a property list with - this property is compared to another property list with the same property. - The H5P_prp_compare_func_t callback function is defined - as follows: -

    typedef int (*H5P_prp_compare_func_t)( - const void *value1, - const void *value2, - size_t size); -
- The parameters to the callback function are defined as follows: -
    - - - - - - - - - -
    const void *value1IN: The value of the first property to compare
    const void *value2IN: The value of the second property to compare
    size_t sizeIN: The size of the property in bytes
- The compare routine may not modify the values. - The compare routine should return a positive value if - value1 is greater than value2, a negative value - if value2 is greater than value1 and zero if - value1 and value2 are equal. -

- -

- The close routine is called when a property list with - this property is being closed. - The H5P_prp_close_func_t callback function is defined - as follows: -

    typedef herr_t (*H5P_prp_close_func_t)( - hid_t prop_id, - const char *name, - size_t size, - void *value); -
- The parameters to the callback function are defined as follows: -
    - - - - - - - - - - - - -
    hid_t prop_idIN: The identifier of the property list being - closed
    const char *nameIN: The name of the property in the list
    size_t sizeIN: The size of the property in bytes
    void *valueIN: The value for the property being closed
- The close routine may modify the value passed in, - but the value is not used by the library when the - close routine returns. - If the close routine returns a negative value, - the property list close routine returns an error value but - the property list is still closed. -

- -
Parameters: -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    hid_t classIN: Property list class to register permanent property - within
    const char * nameIN: Name of property to register
    size_t sizeIN: Size of property in bytes
    void * defaultIN: Default value for property in newly created property - lists
    H5P_prp_create_func_t create    IN: Callback routine called when a property list is being - created and the property value will be initialized
    H5P_prp_set_func_t setIN: Callback routine called before a new value is copied - into the property's value
    H5P_prp_get_func_t getIN: Callback routine called when a property value is - retrieved from the property
    H5P_prp_delete_func_t deleteIN: Callback routine called when a property is deleted from - a property list
    H5P_prp_copy_func_t copyIN: Callback routine called when a property is copied from - a property list
    H5P_prp_compare_func_t compareIN: Callback routine called when a property is compared with - another property list
    H5P_prp_close_func_t closeIN: Callback routine called when a property list is being - closed and the property value will be disposed of
- -
Returns: -
Success: a non-negative value -
Failure: a negative value - -
Fortran90 Interface: h5pregister_f -
-
-SUBROUTINE h5pregister_f
-  IMPLICIT NONE
-  INTEGER(HID_T), INTENT(IN) :: class   ! Property list class identifier 
-  CHARACTER(LEN=*), INTENT(IN) :: name  ! Name of property to register
-  INTEGER(SIZE_T), INTENT(IN) :: size   ! Size of the property value	
-  TYPE,   INTENT(IN) :: value           ! Property value
-                                        ! Supported types are:
-                                        !    INTEGER
-                                        !    REAL
-                                        !    DOUBLE PRECISION
-                                        !    CHARACTER(LEN=*)
-  INTEGER, INTENT(OUT) :: hdferr        ! Error code
-                                        ! 0 on success and -1 on failure
-END SUBROUTINE h5pregister_f
-	
- - -
- - - -
-
-
Name: H5Premove - -
Signature: -
herr_t H5Premove( - hid_t plid; - const char *name - ) - -
Purpose: -
Removes a property from a property list. - -
Description: -
H5Premove removes a property from a property list. - -

- Both properties which were in existence when the property list - was created (i.e. properties registered with H5Pregister) - and properties added to the list after it was created (i.e. added - with H5Pinsert) may be removed from a property list. - Properties do not need to be removed from a property list before the - list itself is closed; they will be released automatically when - H5Pclose is called. - -

- If a close callback exists for the removed property, - it will be called before the property is released. - -

Parameters: -
    - - - - - - -
    hid_t plidIN: Identifier of the property list to modify
    const char *name    IN: Name of property to remove
- -
Returns: -
Success: a non-negative value -
Failure: a negative value - -
Fortran90 Interface: h5premove_f -
-
-SUBROUTINE h5premove_f(plid, name, hdferr)
-  IMPLICIT NONE
-  INTEGER(HID_T), INTENT(IN) :: plid   ! Property list identifier
-  CHARACTER(LEN=*), INTENT(IN) :: name ! Name of property to remove
-  INTEGER, INTENT(OUT) :: hdferr       ! Error code
-                                       ! 0 on success and -1 on failure
-END SUBROUTINE h5premove_f
-	
- - -
- - - - -
-
-
Name: H5Pset - -
Signature: -
herr_t H5Pset( - hid_t plid, - const char *name, - void *value) - ) - -
Purpose: -
Sets a property list value. - -
Description: -
H5Pset sets a new value for a property in a - property list. If there is a set callback - routine registered for this property, the value will be - passed to that routine and any changes to the value - will be used when setting the property value. - The information pointed to by the value pointer - (possibly modified by the set callback) is copied into - the property list value and may be changed by the application making - the H5Pset call without affecting the property value. - -

- The property name must exist or this routine will fail. - -

- If the set callback routine returns an error, the - property value will not be modified. - -

- This routine may not be called for zero-sized properties - and will return an error in that case. - -

Parameters: -
    - - - - - - -
    hid_t plid; - IN: Property list identifier to modify
    const char *name;     - IN: Name of property to modify
    void *value; - IN: Pointer to value to set the property to
- -
Returns: -
Success: a non-negative value -
Failure: a negative value - -
Fortran90 Interface: h5pset_f -
-
-SUBROUTINE h5pset_f(plid, name, value, hdferr)
-  IMPLICIT NONE
-  INTEGER(HID_T), INTENT(IN) :: plid    ! Property list identifier 
-  CHARACTER(LEN=*), INTENT(IN) :: name  ! Name of property to set
-  TYPE,   INTENT(IN) :: value           ! Property value
-                                        ! Supported types are:
-                                        !    INTEGER
-                                        !    REAL
-                                        !    DOUBLE PRECISION
-                                        !    CHARACTER(LEN=*)
-  INTEGER, INTENT(OUT) :: hdferr        ! Error code
-                                        ! 0 on success and -1 on failure
-END SUBROUTINE h5pset_f
-	
- - -
- - - -
-
-
Name: H5Pset_alignment -
Signature: -
herr_t H5Pset_alignment(hid_t plist, - hsize_t threshold, - hsize_t alignment - ) -
Purpose: -
Sets alignment properties of a file access property list. -
Description: -
H5Pset_alignment sets the alignment properties - of a file access property list - so that any file object greater than or equal in size to - threshold bytes will be aligned on an address - which is a multiple of alignment. The addresses - are relative to the end of the user block; the alignment is - calculated by subtracting the user block size from the - absolute file address and then adjusting the address to be a - multiple of alignment. -

- Default values for threshold and - alignment are one, implying - no alignment. Generally the default values will result in - the best performance for single-process access to the file. - For MPI-IO and other parallel systems, choose an alignment - which is a multiple of the disk block size. -

Parameters: -
    - - - - - - - - - -
    hid_t plistIN: Identifier for a file access property list.
    hsize_t threshold    IN: Threshold value. - Note that setting the threshold value to 0 (zero) has - the effect of a special case, forcing everything - to be aligned.
    hsize_t alignmentIN: Alignment value.
-
Returns: -
Returns a non-negative value if successful; - otherwise returns a negative value. -
Fortran90 Interface: h5pset_alignment_f -
-
-SUBROUTINE h5pset_alignment_f(prp_id, threshold,  alignment, hdferr)
-  IMPLICIT NONE
-  INTEGER(HID_T), INTENT(IN) :: prp_id       ! Property list identifier
-  INTEGER(HSIZE_T), INTENT(IN) :: threshold  ! Threshold value
-  INTEGER(HSIZE_T), INTENT(IN) :: alignment  ! Alignment value
-  INTEGER, INTENT(OUT) :: hdferr             ! Error code
-                                             ! 0 on success and -1 on failure
-END SUBROUTINE h5pset_alignment_f
-	
- - -
- - - -
-
-
Name: H5Pset_alloc_time -
Signature: -
herr_t H5Pset_alloc_time(hid_t plist_id, - H5D_alloc_time_t alloc_time - ) -
Purpose: -
Sets the timing for storage space allocation. -
Description: -
H5Pset_alloc_time sets up the timing for the allocation of - storage space for a dataset's raw data. - This property is set in the dataset creation property list - plist_id. -

- Timing is specified in fill_time with one of the - following values: - -
     - H5D_ALLOC_TIME_DEFAULT   - - Allocate dataset storage space at the default time.
- (Defaults differ by storage method.) -
- H5D_ALLOC_TIME_EARLY - - Allocate all space when the dataset is created.
- (Default for compact datasets.) -
- H5D_ALLOC_TIME_INCR   - - Allocate space incrementally, as data is written to the dataset.
- (Default for chunked storage datasets.) -
  • Chunked datasets: - Storage space allocation for each chunk is deferred until data - is written to the chunk. -
  • Contiguous datasets: - Incremental storage space allocation for contiguous data - is treated as late allocation. -
  • Compact datasets: - Incremental allocation is not allowed with compact datasets; - H5Pset_alloc_time will return an error. -
  • - H5D_ALLOC_TIME_LATE - - Allocate all space when data is first written to the dataset.
    - (Default for contiguous datasets.) -
    -

    Note: -
    H5Pset_alloc_time is designed to work in concert - with the dataset fill value and fill value write time properties, - set with the functions - H5Pset_fill_value and H5Pset_fill_time. -

    -

    - See H5Dcreate for - further cross-references. -

    Parameters: -
      - - - - - - -
      hid_t plist_idIN: Dataset creation property list identifier.
      H5D_alloc_time_t alloc_time    IN: When to allocate dataset storage space.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5pset_alloc_time_f -
    -
    -SUBROUTINE h5pset_alloc_time_f(plist_id, flag, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: plist_id  ! Dataset creation property
    -                                          ! list identifier
    -  INTEGER(HSIZE_T), INTENT(IN) :: flag    ! Allocation time flag
    -                                          ! Possible values are:
    -                                          !    H5D_ALLOC_TIME_ERROR_F
    -                                          !    H5D_ALLOC_TIME_DEFAULT_F
    -                                          !    H5D_ALLOC_TIME_EARLY_F
    -                                          !    H5D_ALLOC_TIME_LATE_F
    -                                          !    H5D_ALLOC_TIME_INCR_F
    - -
    -  INTEGER, INTENT(OUT)       :: hdferr    ! Error code
    -                                          ! 0 on success and -1 on failure
    -END SUBROUTINE h5pset_alloc_time_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pset_btree_ratios -
    Signature: -
    herr_t H5Pset_btree_ratios(hid_t plist, - double left, - double middle, - double right - ) -
    Purpose: -
    Sets B-tree split ratios for a dataset transfer property list. -
    Description: -
    H5Pset_btree_ratios sets the B-tree split ratios - for a dataset transfer property list. The split ratios determine - what percent of children go in the first node when a node splits. -

    - The ratio left is used when the splitting node is - the left-most node at its level in the tree; - the ratio right is used when the splitting node is - the right-most node at its level; - and the ratio middle is used for all other cases. -

    - A node which is the only node at its level in the tree uses - the ratio right when it splits. -

    - All ratios are real numbers between 0 and 1, inclusive. -

    Parameters: -
      - - - - - - - - - - - - -
      hid_t plistIN: The dataset transfer property list identifier.
      double leftIN: The B-tree split ratio for left-most nodes.
      double rightIN: The B-tree split ratio for right-most nodes and lone nodes.
      double middle    IN: The B-tree split ratio for all other nodes.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5pset_btree_ratios_f -
    -
    -SUBROUTINE h5pset_btree_ratios_f(prp_id, left, middle, right, hdferr)
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: prp_id   
    -                                  ! Property list identifier
    -  REAL, INTENT(IN) :: left        ! The B-tree split ratio for left-most nodes
    -  REAL, INTENT(IN) :: middle      ! The B-tree split ratio for all other nodes
    -  REAL, INTENT(IN) :: right       ! The B-tree split ratio for right-most
    -                                  ! nodes and lone nodes.
    -  INTEGER, INTENT(OUT) :: hdferr  ! Error code
    -                                  ! 0 on success and -1 on failure
    -END SUBROUTINE h5pset_btree_ratios_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pset_buffer -
    Signature: -
    herr_t H5Pset_buffer(hid_t plist, - hsize_t size, - void *tconv, - void *bkg - ) -
    Purpose: -
    Sets type conversion and background buffers. -
    Description: -
    Given a dataset transfer property list, H5Pset_buffer - sets the maximum size - for the type conversion buffer and background buffer and - optionally supplies pointers to application-allocated buffers. - If the buffer size is smaller than the entire amount of data - being transferred between the application and the file, and a type - conversion buffer or background buffer is required, then - strip mining will be used. -

    - Note that there are minimum size requirements for the buffer. - Strip mining can only break the data up along the first dimension, - so the buffer must be large enough to accommodate a complete slice - that encompasses all of the remaining dimensions. - For example, when strip mining a 100x200x300 hyperslab - of a simple data space, the buffer must be large enough to - hold 1x200x300 data elements. - When strip mining a 100x200x300x150 hyperslab of a simple data space, - the buffer must be large enough to hold 1x200x300x150 data elements. -

    - If tconv and/or bkg are null pointers, - then buffers will be allocated and freed during the data transfer. -

    - The default value for the maximum buffer is 1 Mb. -

    Parameters: -
      - - - - - - - - - - - - -
      hid_t plistIN: Identifier for the dataset transfer property list.
      hsize_t size    IN: Size, in bytes, of the type conversion and background buffers.
      void tconvIN: Pointer to application-allocated type conversion buffer.
      void bkgIN: Pointer to application-allocated background buffer.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5pset_buffer_f -
    -
    -SUBROUTINE h5pset_buffer_f(plist_id, size, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN)   :: plist_id ! Dataset transfer property 
    -                                           ! list identifier
    -  INTEGER(HSIZE_T), INTENT(IN) :: size     ! Conversion buffer size
    -  INTEGER, INTENT(OUT)         :: hdferr   ! Error code
    -                                           ! 0 on success and -1 on failure
    -END SUBROUTINE h5pset_buffer_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pset_cache -
    Signature: -
    herr_t H5Pset_cache(hid_t plist_id, - int mdc_nelmts, - int rdcc_nelmts, - size_t rdcc_nbytes, - double rdcc_w0 - ) -
    Purpose: -
    Sets the meta data cache and raw data chunk cache parameters. -
    Description: -
    H5Pset_cache sets - the number of elements (objects) in the meta data cache and - the number of elements, the total number of bytes, and - the preemption policy value in the raw data chunk cache. -

    - The plist_id is a file access property list. - The number of elements (objects) in the meta data cache - and the raw data chunk cache are mdc_nelmts and - rdcc_nelmts, respectively. - The total size of the raw data chunk cache and the preemption policy - are rdcc_nbytes and rdcc_w0. -

    - Any (or all) of the H5Pget_cache pointer arguments - may be null pointers. -

    - The rdcc_w0 value should be between 0 and 1 inclusive and - indicates how much chunks that have been fully read are - favored for preemption. A value of zero means fully read - chunks are treated no differently than other chunks (the - preemption is strictly LRU) while a value of one means fully - read chunks are always preempted before other chunks. -

    Parameters: -
      - - - - - - - - - - - - - - - -
      hid_t plist_idIN: Identifier of the file access property list.
      int mdc_nelmtsIN: Number of elements (objects) in the meta data cache.
      int rdcc_nelmtsIN: Number of elements (objects) in the raw data chunk cache.
      size_t rdcc_nbytes    IN: Total size of the raw data chunk cache, in bytes.
      double rdcc_w0IN: Preemption policy.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5pset_cache_f -
    -
    -SUBROUTINE h5pset_cache_f(prp_id, mdc_nelmts,rdcc_nelmts, rdcc_nbytes, rdcc_w0, hdferr)
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: prp_id        ! Property list identifier
    -  INTEGER, INTENT(IN) :: mdc_nelmts           ! Number of elements (objects)
    -                                              ! in the meta data cache
    -  INTEGER(SIZE_T), INTENT(IN) :: rdcc_nelmts  ! Number of elements (objects)
    -                                              ! in the meta data cache
    -  INTEGER(SIZE_T), INTENT(IN) :: rdcc_nbytes  ! Total size of the raw data
    -                                              ! chunk cache, in bytes
    -  REAL, INTENT(IN) :: rdcc_w0                 ! Preemption policy
    -  INTEGER, INTENT(OUT) :: hdferr              ! Error code
    -                                              ! 0 on success and -1 on failure
    -END SUBROUTINE h5pset_cache_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pset_chunk -
    Signature: -
    herr_t H5Pset_chunk(hid_t plist, - int ndims, - const hsize_t * dim - ) -
    Purpose: -
    Sets the size of the chunks used to store a chunked layout dataset. -
    Description: -
    H5Pset_chunk sets the size of the chunks used to - store a chunked layout dataset. This function is only valid - for dataset creation property lists. -

    - The ndims parameter currently must be the same size - as the rank of the dataset. -

    - The values of the dim - array define the size of the chunks to store the dataset's raw data. - The unit of measure for dim values is - dataset elements. -

    - As a side-effect of this function, the layout of the dataset is - changed to H5D_CHUNKED, if it is not already so set. - (See H5Pset_layout.) -

    Parameters: -
      - - - - - - - - - -
      hid_t plistIN: Identifier for property list to query.
      int ndimsIN: The number of dimensions of each chunk.
      const hsize_t * dim    IN: An array defining the size, in dataset elements, - of each chunk.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5pset_chunk_f -
    -
    -SUBROUTINE h5pset_chunk_f(prp_id, ndims, dims, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: prp_id  ! Property list identifier 
    -  INTEGER, INTENT(IN) :: ndims          ! Number of chunk dimensions
    -  INTEGER(HSIZE_T), DIMENSION(ndims), INTENT(IN) :: dims    
    -                                        ! Array containing sizes of
    -                                        ! chunk dimensions
    -  INTEGER, INTENT(OUT) :: hdferr        ! Error code
    -                                        ! 0 on success and -1 on failure
    -END SUBROUTINE h5pset_chunk_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pset_data_transform -
    Signature: -
    herr_t H5Pset_data_transform - (hid_t plist_id, - const char *expression) -
    Purpose: -
    Sets a data transform expression. -
    Description: -
    H5Pset_data_transform sets the data transform to - be used for reading and writing data. - This function operates on the dataset transfer property lists - plist_id. -

    - The expression parameter is a string containing an algebraic - expression, such as (5/9.0)*(x-32) - or x*(x-5). - When a dataset is read or written with this property list, - the transform expression is applied with the x - being replaced by the values in the dataset. - When reading data, the values in the file are not changed - and the transformed data is returned to the user. -

    - Data transforms can only be applied to integer or floating-point - datasets. Order of operations is obeyed and the only supported - operations are +, -, *, and /. Parentheses can be nested arbitrarily - and can be used to change precedence. -

    - When writing data back to the dataset, the transformed data is - written to the file and there is no way to recover the original - values to which the transform was applied. -

    Parameters: -
      - - - - - - -
      hid_t plist_idIN: Identifier of the property list or class
      const char *expression  IN: Pointer to the null-terminated data transform expression -
    -
    Returns: -
    Success: a non-negative value -
    Failure: a negative value -
    Fortran90 Interface: -
    None. - - - -
    - - - -
    -
    -
    Name: H5Pset_deflate -
    Signature: -
    herr_t H5Pset_deflate(hid_t plist, - int level - ) -
    Purpose: -
    Sets compression method and compression level. -
    Description: -
    H5Pset_deflate sets the compression method for a - dataset creation property list to H5D_COMPRESS_DEFLATE - and the compression level to level, which should - be a value from zero to nine, inclusive. - Lower compression levels are faster but result in less compression. - This is the same algorithm as used by the GNU gzip program. -
    Parameters: -
      - - - - - - -
      hid_t plist    IN: Identifier for the dataset creation property list.
      int levelIN: Compression level.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5pset_deflate_f -
    -
    -SUBROUTINE h5pset_deflate_f(prp_id, level, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: prp_id ! Property list identifier 
    -  INTEGER, INTENT(IN)        :: level  ! Compression level 
    -  INTEGER, INTENT(OUT)       :: hdferr ! Error code
    -                                       ! 0 on success and -1 on failure
    -END SUBROUTINE h5pset_deflate_f
    -	
    - - -
    - - - - - -
    -
    -
    Name: H5Pset_dxpl_mpio -
    Signature: -
    herr_t H5Pset_dxpl_mpio( - hid_t dxpl_id, - H5FD_mpio_xfer_t xfer_mode - ) -
    Purpose: -
    Sets data transfer mode. -
    Description: -
    H5Pset_dxpl_mpio sets the data transfer property list - dxpl_id to use transfer mode xfer_mode. - The property list can then be used to control the I/O transfer mode - during data I/O operations. -

    - Valid transfer modes are as follows: -

    -
    -
    H5FD_MPIO_INDEPENDENT -
    Use independent I/O access (default). -
    H5FD_MPIO_COLLECTIVE -
    Use collective I/O access. -
    -

    -
    Parameters: -
      - - - - - - -
      hid_t dxpl_idIN: Data transfer property list identifier.
      H5FD_mpio_xfer_t xfer_mode    IN: Transfer mode.
    -
    Returns: -
    Returns a non-negative value if successful. - Otherwise returns a negative value. -
    Fortran90 Interface: -
    -
    -SUBROUTINE h5pset_dxpl_mpio_f(prp_id, data_xfer_mode, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: prp_id  ! Property list identifier
    -  INTEGER, INTENT(IN) :: data_xfer_mode ! Data transfer mode 
    -                                        ! Possible values are:
    -                                        !    H5FD_MPIO_INDEPENDENT_F
    -                                        !    H5FD_MPIO_COLLECTIVE_F
    -  INTEGER, INTENT(OUT) :: hdferr        ! Error code
    -                                        ! 0 on success and -1 on failure
    -END SUBROUTINE h5pset_dxpl_mpio_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pset_dxpl_multi -
    Signature: -
    herr_t H5Pset_dxpl_multi( - hid_t dxpl_id, - const hid_t *memb_dxpl - ) -
    Purpose: -
    Sets the data transfer property list for the multi-file driver. -
    Description: -
    H5Pset_dxpl_multi sets the data transfer property list - dxpl_id to use the multi-file driver for each - memory usage type memb_dxpl[]. -

    - H5Pset_dxpl_multi can only be used after - the member map has been set with H5Pset_fapl_multi. -

    Parameters: -
      - - - - - - -
      hid_t dxpl_id,IN: Data transfer property list identifier.
      const hid_t *memb_dxpl    IN: Array of data access property lists.
    -
    Returns: -
    Returns a non-negative value if successful. - Otherwise returns a negative value. -
    Fortran90 Interface: -
    None. - - - -
    - - - -
    -
    -
    Name: H5Pset_edc_check -
    Signature: -
    herr_t H5Pset_edc_check(hid_t plist, - H5Z_EDC_t check) -
    Purpose: -
    Sets whether to enable error-detection when reading a dataset. -
    Description: -
    H5Pset_edc_check sets the dataset transfer property - list plist to enable or disable error detection - when reading data. -

    - Whether error detection is enabled or disabled is specified - in the check parameter. - Valid values are as follows: - - -
           - H5Z_ENABLE_EDC   (default) -
    - H5Z_DISABLE_EDC -
    -

    - The error detection algorithm used is the algorithm previously - specified in the corresponding dataset creation property list.   -

    - This function does not affect the use of error detection when - writing data.   -

    Note: -
    The initial error detection implementation, Fletcher32 checksum, - supports error detection for chunked datasets only. -
    Parameters: -
      - - - - - - -
      hid_t plistIN: Dataset transfer property list identifier.
      H5Z_EDC_t check    IN: Specifies whether error checking is enabled or disabled - for dataset read operations.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5pset_edc_check_f -
    -
    -SUBROUTINE h5pset_edc_check_f(prp_id, flag, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: prp_id   ! Dataset transfer property 
    -                                         ! list identifier 
    -  INTEGER, INTENT(IN)        :: flag     ! EDC flag; possible values
    -                                         !    H5Z_DISABLE_EDC_F 
    -                                         !    H5Z_ENABLE_EDC_F 
    -  INTEGER, INTENT(OUT)       :: hdferr   ! Error code
    -                                         ! 0 on success and -1 on failure
    - 
    -END SUBROUTINE h5pset_edc_check_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pset_external -
    Signature: -
    herr_t H5Pset_external(hid_t plist, - const char *name, - off_t offset, - hsize_t size - ) -
    Purpose: -
    Adds an external file to the list of external files. -
    Description: -
    The first call to H5Pset_external sets the - external storage property in the property list, - thus designating that the dataset will be stored in - one or more non-HDF5 file(s) external to the HDF5 file. - This call also adds the file name as the - first file in the list of external files. - Subsequent calls to the function add the named file as - the next file in the list. -

    - If a dataset is split across multiple files, then the files - should be defined in order. The total size of the dataset is - the sum of the size arguments for all the external files. - If the total size is larger than the size of a dataset then the - dataset can be extended (provided the data space also allows - the extending). -

    - The size argument specifies the number of bytes reserved - for data in the external file. - If size is set to H5F_UNLIMITED, the - external file can be of unlimited size and no more files can be added - to the external files list. -

    - All of the external files for a given dataset must be - specified with H5Pset_external - before H5Dcreate is called to create - the dataset. - If one these files does not exist on the system when - H5Dwrite is called to write data to it, - the library will create the file. -

    Parameters: -
      - - - - - - - - - - - - -
      hid_t plistIN: Identifier of a dataset creation property list.
      const char *name    IN: Name of an external file.
      off_t offsetIN: Offset, in bytes, from the beginning of the file - to the location in the file where the data starts.
      hsize_t sizeIN: Number of bytes reserved in the file for the data.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5pset_external_f -
    -
    -SUBROUTINE h5pset_external_f(prp_id, name, offset,bytes, hdferr)
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: prp_id  ! Property list identifier
    -  CHARACTER(LEN=*), INTENT(IN) :: name  ! Name of an external file
    -  INTEGER, INTENT(IN) :: offset         ! Offset, in bytes, from the 
    -                                        ! beginning of the file to the 
    -                                        ! location in the file where 
    -                                        ! the data starts
    -  INTEGER(HSIZE_T), INTENT(IN) :: bytes ! Number of bytes reserved in 
    -                                        ! the file for the data
    -  INTEGER, INTENT(OUT) :: hdferr        ! Error code
    -                                        ! 0 on success and -1 on failure
    -END SUBROUTINE h5pset_external_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pset_family_offset -
    Signature: -
    herr_t H5Pset_family_offset ( - hid_t fapl_id, - hsize_t offset - ) -
    Purpose: -
    Sets offset property for low-level access to a file in a family of files. -
    Description: -
    H5Pset_family_offset sets the offset property in the - file access property list fapl_id so that the user application - can retrieve a file handle for low-level access to a particular member - of a family of files. The file handle is retrieved with a separate call - to H5Fget_vfd_handle - (or, in special circumstances, to H5FDget_vfd_handle; - see Virtual File Layer and List of VFL Functions - in HDF5 Technical Notes). -

    - The value of offset is an offset in bytes from the - beginning of the HDF5 file, identifying a user-determined location - within the HDF5 file. The file handle the user application is seeking - is for the specific member-file in the associated family of files - to which this offset is mapped. -

    - Use of this function is only appropriate for an HDF5 file written as a - family of files with the FAMILY file driver. -

    Parameters: -
      - - - - - - -
      hid_t fapl_idIN: File access property list identifier.
      hsize_t offset    IN: Offset in bytes within the HDF5 file.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5pset_family_offset_f -
    -
    -SUBROUTINE h5pset_family_offset_f(prp_id, offset, hdferr)
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN)   :: prp_id   ! Property list identifier
    -  INTEGER(HSIZE_T), INTENT(IN) :: offset   ! Offset in bytes
    -  INTEGER, INTENT(OUT)         :: hdferr   ! Error code
    -                                           ! 0 on success and -1 on failure
    - 
    -END SUBROUTINE h5pset_family_offset_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pset_fapl_core -
    Signature: -
    herr_t H5Pset_fapl_core( - hid_t fapl_id, - size_t increment, - hbool_t backing_store - ) -
    Purpose: -
    Modifies the file access property list to use the - H5FD_CORE driver. -
    Description: -
    H5Pset_fapl_core modifies the file access property list - to use the H5FD_CORE driver. -

    - The H5FD_CORE driver enables an application to work - with a file in memory, speeding reads and writes as no disk access - is made. File contents are stored only in memory until the file - is closed. The backing_store parameter determines - whether file contents are ever written to disk. -

    - increment specifies the increment by which allocated - memory is to be increased each time more memory is required. -

    - If backing_store is set to 1 - (TRUE), the file contents are flushed to a file - with the same name as this core file when the file is closed - or access to the file is terminated in memory. -

    Note: -
    There is currently no means for reading a file from disk then - using the H5FD_CORE driver to manipulate the file. -
    Parameters: -
      - - - - - - - - - -
      hid_t fapl_idIN: File access property list identifier.
      size_t incrementIN: Size, in bytes, of memory increments.
      hbool_t backing_store    IN: Boolean flag indicating whether to write the file - contents to disk when the file is closed.
    -
    Returns: -
    Returns a non-negative value if successful. - Otherwise returns a negative value. -
    Fortran90 Interface: h5pset_fapl_core_f -
    -
    -SUBROUTINE h5pset_fapl_core_f(prp_id, increment, backing_store, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN)  :: prp_id    ! Property list identifier
    -  INTEGER(SIZE_T), INTENT(IN) :: increment ! File block size in bytes
    -  LOGICAL, INTENT(IN) :: backing_store     ! Flag to indicate that entire 
    -                                           ! file contents are flushed to 
    -                                           ! a file with the same name as 
    -                                           ! this core file
    -  INTEGER, INTENT(OUT) :: hdferr           ! Error code
    -                                           ! 0 on success and -1 on failure
    -END SUBROUTINE h5pset_fapl_core_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pset_fapl_family -
    Signature: -
    herr_t H5Pset_fapl_family ( - hid_t fapl_id, - hsize_t memb_size, - hid_t memb_fapl_id - ) -
    Purpose: -
    Sets the file access property list to use the family driver. -
    Description: -
    H5Pset_fapl_family sets the file access property list - identifier, fapl_id, to use the family driver. -

    - memb_size is the size in bytes of each file member. This size - will be saved in the file when the property list fapl_id is used - to create a new file. If fapl_id is used to open an existing - file, memb_size has to be equal to the original size saved in - the file. A failure with an errror message indicating the correct member - size will be returned if memb_size does not match the size saved. - If any user does not know the original size, H5F_FAMILY_DEFAULT - can be passed in. The library will retrieve the correct size saved in the file. -

    - memb_fapl_id is the identifier of the - file access property list to be used for each family member. -

    Parameters: -
      - - - - - - - - - -
      hid_t fapl_idIN: File access property list identifier.
      hsize_t memb_sizeIN: Size in bytes of each file member.
      hid_t memb_fapl_id    IN: Identifier of file access property list for each - family member.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5pset_fapl_family_f -
    -
    -SUBROUTINE h5pset_fapl_family_f(prp_id, imemb_size, memb_plist, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN)   :: prp_id    ! Property list identifier
    -  INTEGER(HSIZE_T), INTENT(IN) :: memb_size ! Logical size, in bytes,
    -                                            ! of each family member
    -  INTEGER(HID_T), INTENT(IN) :: memb_plist  ! Identifier of the file 
    -                                            ! access property list to be
    -                                            ! used for each family member
    -  INTEGER, INTENT(OUT) :: hdferr            ! Error code
    -                                            ! 0 on success and -1 on failure
    -END SUBROUTINE h5pset_fapl_family_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pset_fapl_gass -
    Signature: -
    herr_t H5Pset_fapl_gass( - hid_t fapl_id, - GASS_Info info - ) -
    Purpose: -
    Stores user-supplied GASS information. -
    Description: -
    H5Pset_fapl_gass stores user-supplied GASS information, - the GASS_Info struct data as passed in info, - to the file access property list fapl_id. - fapl_id can then be used to create and/or open the file. -

    - The GASS_Info object, info, is used for - file open operations when using GASS in the Globus environment. -

    - Any modification to info after this function call - returns may have undetermined effect to the access property list. - Users must call H5Pset_fapl_gass again to setup - the property list. -

    Note: -
    H5Pset_fapl_gass is an experimental function. - It is designed for use only when accessing files via the - GASS facility of the Globus environment. - For further information, see - http//www.globus.org/. -
    Parameters: -
      - - - - - - -
      hid_t fapl_id,IN: File access property list identifier.
      GASS_Info info    IN: Pointer to the GASS information structure.
    -
    Returns: -
    Returns a non-negative value if successful. - Otherwise returns a negative value. -
    Fortran90 Interface: -
    None. - - -
    - - - -
    -
    -
    Name: H5Pset_fapl_log -
    Signature: -
    herr_t H5Pset_fapl_log( - hid_t fapl_id, - const char *logfile, - unsigned int flags, - size_t buf_size - ) -
    Purpose: -
    Sets up the use of the logging driver. -
    Description: -
    H5Pset_fapl_log modifies the - file access property list to use the logging driver - H5FD_LOG. -

    - logfile is the name of the file in which the - logging entries are to be recorded. -

    - The actions to be logged are specified in the parameter flags - using the pre-defined constants described in the following table. - Multiple flags can be set through the use of an logical OR contained - in parentheses. For example, logging read and write locations would - be specified as (H5FD_LOG_LOC_READ|H5FD_LOG_LOC_WRITE). - -

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    -
    - Flag - -
    -
    - Description -
    -
    - H5FD_LOG_LOC_READ - -
    -
    - Track the location and length of every read, write, or seek operation. -
    - H5FD_LOG_LOC_WRITE   - -
    - H5FD_LOG_LOC_SEEK - -
    - H5FD_LOG_LOC_IO - - - Track all I/O locations and lengths. - The logical equivalent of the following: -
    -   - - (H5FD_LOG_LOC_READ | H5FD_LOG_LOC_WRITE | H5FD_LOG_LOC_SEEK) -
    -
    - H5FD_LOG_FILE_READ - -
    -
    - Track the number of times each byte is read or written. -
    - H5FD_LOG_FILE_WRITE   - -
    - H5FD_LOG_FILE_IO - - - Track the number of times each byte is read and written. - The logical equivalent of the following: -
    -   - - (H5FD_LOG_FILE_READ | H5FD_LOG_FILE_WRITE) -
    -
    - H5FD_LOG_FLAVOR - -
    -
    - Track the type, or flavor, of information stored at each byte. -
    -
    - H5FD_LOG_NUM_READ - -
    -
    - Track the total number of read, write, or seek operations that occur. -
    - H5FD_LOG_NUM_WRITE - -
    - H5FD_LOG_NUM_SEEK - -
    - H5FD_LOG_NUM_IO - - - Track the total number of all types of I/O operations. - The logical equivalent of the following: -
    -   - - (H5FD_LOG_NUM_READ | H5FD_LOG_NUM_WRITE | H5FD_LOG_NUM_SEEK) -
    -
    - H5FD_LOG_TIME_OPEN - -
    -
    - Track the time spent in open, read, write, seek, or close operations. - - - Not implemented in this release: open and read -
    - Partially implemented: write and seek -
    - Fully implemented: close
    -
    -
    - H5FD_LOG_TIME_READ - -
    - H5FD_LOG_TIME_WRITE - -
    - H5FD_LOG_TIME_SEEK - -
    - H5FD_LOG_TIME_CLOSE - -
    - H5FD_LOG_TIME_IO - - - Track the time spent in each of the above operations. - The logical equivalent of the following: -
    -   - - (H5FD_LOG_TIME_OPEN | H5FD_LOG_TIME_READ | H5FD_LOG_TIME_WRITE - | H5FD_LOG_TIME_SEEK | H5FD_LOG_TIME_CLOSE) -
    -
    - H5FD_LOG_ALLOC - -
    -
    - Track the allocation of space in the file. -
    -
    - H5FD_LOG_ALL - -
    -
    - Track everything. - The logical equivalent of the following: -
    -   - - (H5FD_LOG_ALLOC | H5FD_LOG_TIME_IO | H5FD_LOG_NUM_IO | H5FD_LOG_FLAVOR - |H5FD_LOG_FILE_IO | H5FD_LOG_LOC_IO) -
    -
    -
    -
    -
    -
    - -

    - The logging driver can track the number of times - each byte in the file is read from or written to - (using H5FD_LOG_FILE_READ and H5FD_LOG_FILE_WRITE) - and what kind of data is at that location - (e.g., meta data, raw data; using H5FD_LOG_FLAVOR). - This information is tracked in a buffer of size buf_size, - which must be at least the size in bytes of the file to be logged. - -

    Parameters: -
      - - - - - - - - - - - - -
      hid_t fapl_idIN: File access property list identifier.
      char *logfileIN: Name of the log file.
      unsigned int flags    IN: Flags specifying the types of logging activity.
      size_t buf_sizeIN: The size of the logging buffer.
    -
    Returns: -
    Returns non-negative if successful. - Otherwise returns negative. -
    Fortran90 Interface: -
    None. - - - -
    - - - -
    -
    -
    Name: H5Pset_fapl_mpio -
    Signature: -
    herr_t H5Pset_fapl_mpio( - hid_t fapl_id, - MPI_Comm comm, - MPI_Info info - ) -
    Purpose: -
    Stores MPI IO communicator information to the file access property list. -
    Description: -
    H5Pset_fapl_mpio stores the user-supplied - MPI IO parameters comm, for communicator, and - info, for information, in - the file access property list fapl_id. - That property list can then be used to create and/or open the file. -

    - H5Pset_fapl_mpio is available only in the - parallel HDF5 library and is not a collective function. -

    - comm is the MPI communicator to be used for - file open as defined in MPI_FILE_OPEN of MPI-2. - This function does not create a duplicated communicator. - Modifications to comm after this function call - returns may have an undetermined effect on the access property list. - Users should not modify the communicator while it is defined - in a property list. -

    - info is the MPI info object to be used for - file open as defined in MPI_FILE_OPEN of MPI-2. - This function does not create a duplicated info object. - Any modification to the info object after this function call - returns may have an undetermined effect on the access property list. - Users should not modify the info while it is defined - in a property list. -

    Parameters: -
      - - - - - - - - - -
      hid_t fapl_idIN: File access property list identifier.
      MPI_Comm comm    IN: MPI-2 communicator.
      MPI_Info infoIN: MPI-2 info object.
    -
    Returns: -
    Returns a non-negative value if successful. - Otherwise returns a negative value. -
    Fortran90 Interface: h5pset_fapl_mpio_f -
    -
    -SUBROUTINE h5pset_fapl_mpio_f(prp_id, comm, info, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: prp_id  ! Property list identifier
    -  INTEGER, INTENT(IN) :: comm           ! MPI communicator to be used for 
    -                                        ! file open as defined in 
    -                                        ! MPI_FILE_OPEN of MPI-2
    -  INTEGER, INTENT(IN) :: info           ! MPI info object to be used for 
    -                                        ! file open as defined in 
    -                                        ! MPI_FILE_OPEN of MPI-2
    -  INTEGER, INTENT(OUT) :: hdferr        ! Error code
    -                                        ! 0 on success and -1 on failure
    -END SUBROUTINE h5pset_fapl_mpio_f
    -	
    - - -
    - - - - -
    -
    -
    Name: H5Pset_fapl_mpiposix -
    Signature: -
    herr_t H5Pset_fapl_mpiposix( - hid_t fapl_id, - MPI_Comm comm - ) -
    Purpose: -
    Stores MPI IO communicator information to a file access property list. -
    Description: -
    H5Pset_fapl_mpiposix stores the user-supplied - MPI IO parameter comm, for communicator, - in the file access property list fapl_id. - That property list can then be used to create and/or open the file. -

    - H5Pset_fapl_mpiposix is available only in the - parallel HDF5 library and is not a collective function. -

    - comm is the MPI communicator to be used for - file open as defined in MPI_FILE_OPEN of MPI-2. - This function does not create a duplicated communicator. - Modifications to comm after this function call - returns may have an undetermined effect on the access property list. - Users should not modify the communicator while it is defined - in a property list. -

    Parameters: -
      - - - - - - -
      hid_t fapl_idIN: File access property list identifier.
      MPI_Comm comm    IN: MPI-2 communicator.
    -
    Returns: -
    Returns a non-negative value if successful. - Otherwise returns a negative value. -
    Fortran90 Interface: h5pset_fapl_mpiposix_f -
    -
    -SUBROUTINE h5pset_fapl_mpiposix_f(prp_id, comm, use_gpfs, hdferr)
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: prp_id  ! Property list identifier
    -  INTEGER, INTENT(IN) :: comm           ! MPI communicator to be used 
    -                                        ! for file open as defined in 
    -                                        ! MPI_FILE_OPEN of MPI-2
    -  LOGICAL, INTENT(IN) :: use_gpfs
    -  INTEGER, INTENT(OUT) :: hdferr        ! Error code
    -END SUBROUTINE h5pset_fapl_mpiposix_f
    -	
    - - -
    - - - - -
    -
    -
    Name: H5Pset_fapl_multi -
    Signature: -
    herr_t H5Pset_fapl_multi( - hid_t fapl_id, - const H5FD_mem_t *memb_map, - const hid_t *memb_fapl, - const char * const *memb_name, - const haddr_t *memb_addr, - hbool_t relax - ) -
    Purpose: -
    Sets up use of the multi-file driver. -
    Description: -
    H5Pset_fapl_multi sets the file access property list - fapl_id to use the multi-file driver. -

    - The multi-file driver enables different types of HDF5 data and - metadata to be written to separate files. These files are viewed - by the HDF5 library and the application as a single virtual HDF5 file - with a single HDF5 file address space. - The types of data that can be broken out into separate files include - raw data, the superblock, B-tree data, global heap data, - local heap data, and object headers. - At the programmer's discretion, two or more types of data can be - written to the same file while other types of data are written to - separate files. -

    - The array memb_map maps memory usage types to other - memory usage types and is the mechanism that allows the caller - to specify how many files are created. - The array contains H5FD_MEM_NTYPES entries, - which are either the value H5FD_MEM_DEFAULT - or a memory usage type. - The number of unique values determines the number of files - that are opened. -

    - The array memb_fapl contains a property list - for each memory usage type that will be associated with a file. -

    - The array memb_name should be a name generator - (a printf-style format with a %s which will be replaced with the - name passed to H5FDopen, usually from - H5Fcreate or H5Fopen). -

    - The array memb_addr specifies the offsets within the - virtual address space, from 0 (zero) to - HADDR_MAX, at which each type of data storage begins. -

    - If relax is set to TRUE (or 1), - then opening an existing file for read-only access will not fail - if some file members are missing. - This allows a file to be accessed in a limited sense if just the - meta data is available. -

    - Default values for each of the optional arguments are as follows: -

    -
    -
    memb_map -
    The default member map contains the value - H5FD_MEM_DEFAULT for each element. -
    memb_fapl -
    The default value is H5P_DEFAULT for each element. -
    memb_name -
    The default string is   %s-X.h5   - where   X   is one of the - following letters: - - s    for H5FD_MEM_SUPER -
    - b    for H5FD_MEM_BTREE -
    - r    for H5FD_MEM_DRAW -
    - g    for H5FD_MEM_GHEAP -
    - l    for H5FD_MEM_LHEAP -
    - o    for H5FD_MEM_OHDR -
    - -
    memb_addr -
    The default value is HADDR_UNDEF for each element. -
    -
    -
    Parameters: -
      - - - - - - - - - - - - - - - - - - -
      hid_t fapl_idIN: File access property list identifier.
      const H5FD_mem_t *memb_map    IN: Maps memory usage types to other memory usage types.
      const hid_t *memb_faplIN: Property list for each memory usage type.
      const char * const *memb_nameIN: Name generator for names of member files.
      const haddr_t *memb_addrIN: The offsets within the virtual address space, - from 0 (zero) to HADDR_MAX, - at which each type of data storage begins.
      hbool_t relaxIN: Allows read-only access to incomplete file sets - when TRUE.
    -
    Returns: -
    Returns a non-negative value if successful. - Otherwise returns a negative value. -
    Example: -
    The following code sample sets up a multi-file access property list - that partitions data into meta and raw files, each being - one-half of the address: -
    -                  H5FD_mem_t mt, memb_map[H5FD_MEM_NTYPES];
    -                  hid_t memb_fapl[H5FD_MEM_NTYPES];
    -                  const char *memb[H5FD_MEM_NTYPES];
    -                  haddr_t memb_addr[H5FD_MEM_NTYPES];
    - 
    -                  // The mapping...
    -                  for (mt=0; mt<H5FD_MEM_NTYPES; mt++) {
    -                     memb_map[mt] = H5FD_MEM_SUPER;
    -                  }
    -                  memb_map[H5FD_MEM_DRAW] = H5FD_MEM_DRAW;
    - 
    -                  // Member information
    -                  memb_fapl[H5FD_MEM_SUPER] = H5P_DEFAULT;
    -                  memb_name[H5FD_MEM_SUPER] = "%s.meta";
    -                  memb_addr[H5FD_MEM_SUPER] = 0;
    - 
    -                  memb_fapl[H5FD_MEM_DRAW] = H5P_DEFAULT;
    -                  memb_name[H5FD_MEM_DRAW] = "%s.raw";
    -                  memb_addr[H5FD_MEM_DRAW] = HADDR_MAX/2;
    - 
    -                  hid_t fapl = H5Pcreate(H5P_FILE_ACCESS);
    -                  H5Pset_fapl_multi(fapl, memb_map, memb_fapl,
    -                                  memb_name, memb_addr, TRUE);
    -        
    - -
    Fortran90 Interface: h5pset_fapl_multi_f -
    -
    -SUBROUTINE h5pset_fapl_multi_f(prp_id, memb_map, memb_fapl, memb_name,
    -                               memb_addr, relax, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T),INTENT(IN)  :: prp_id     ! Property list identifier
    -
    -  INTEGER,DIMENSION(0:H5FD_MEM_NTYPES_F-1),INTENT(IN)          :: memb_map
    -  INTEGER(HID_T),DIMENSION(0:H5FD_MEM_NTYPES_F-1),INTENT(IN)   :: memb_fapl
    -  CHARACTER(LEN=*),DIMENSION(0:H5FD_MEM_NTYPES_F-1),INTENT(IN) :: memb_name
    -  REAL, DIMENSION(0:H5FD_MEM_NTYPES_F-1), INTENT(IN)           :: memb_addr
    -              ! Numbers in the interval [0,1) (e.g. 0.0 0.1 0.5 0.2 0.3 0.4)
    -              ! real address in the file will be calculated as X*HADDR_MAX 
    -
    -  LOGICAL, INTENT(IN)  :: relax
    -  INTEGER, INTENT(OUT) :: hdferr           ! Error code
    -                                           ! 0 on success and -1 on failure
    -END SUBROUTINE h5pset_fapl_multi_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pset_fapl_sec2 -
    Signature: -
    herr_t H5Pset_fapl_sec2( - hid_t fapl_id - ) -
    Purpose: -
    Sets the sec2 driver. -
    Description: -
    H5Pset_fapl_sec2 modifies the file access property list - to use the H5FD_SEC2 driver. -
    Parameters: -
      - - - -
      hid_t fapl_id    IN: File access property list identifier.
    -
    Returns: -
    Returns a non-negative value if successful. - Otherwise returns a negative value. -
    Fortran90 Interface: h5pset_fapl_sec2_f -
    -
    -SUBROUTINE h5pset_fapl_sec2_f(prp_id, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN)    :: prp_id  ! Property list identifier
    -  INTEGER, INTENT(OUT)          :: hdferr  ! Error code
    -                                           ! 0 on success and -1 on failure
    -END SUBROUTINE h5pset_fapl_sec2_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pset_fapl_split -
    Signature: -
    herr_t H5Pset_fapl_split( - hid_t fapl_id, - const char *meta_ext, - hid_t meta_plist_id, - const char *raw_ext, - hid_t raw_plist_id - ) -
    Purpose: -
    Emulates the old split file driver. -
    Description: -
    H5Pset_fapl_split is a compatibility function that - enables the multi-file driver to emulate the split driver from - HDF5 Releases 1.0 and 1.2. - The split file driver stored metadata and raw data in separate files - but provided no mechanism for separating types of metadata. -

    - fapl_id is a file access property list identifier. -

    - meta_ext is the filename extension for the metadata file. - The extension is appended to the name passed to H5FDopen, - usually from H5Fcreate or H5Fopen, - to form the name of the metadata file. - If the string %s is used in the extension, it works like the - name generator as in H5Pset_fapl_multi. -

    - meta_plist_id is the file access property list identifier - for the metadata file. -

    - raw_ext is the filename extension for the raw data file. - The extension is appended to the name passed to H5FDopen, - usually from H5Fcreate or H5Fopen, - to form the name of the rawdata file. - If the string %s is used in the extension, it works like the - name generator as in H5Pset_fapl_multi. -

    - raw_plist_id is the file access property list identifier - for the raw data file. -

    - If a user wishes to check to see whether this driver is in use, - the user must call H5Pget_driver and compare the - returned value to the string H5FD_MULTI. - A positive match will confirm that the multi driver is in use; - HDF5 provides no mechanism to determine whether it was called - as the special case invoked by H5Pset_fapl_split. -

    Parameters: -
      - - - - - - - - - - - - - - - -
      hid_t fapl_id,IN: File access property list identifier.
      const char *meta_ext,    IN: Metadata filename extension.
      hid_t meta_plist_id,IN: File access property list identifier for the metadata file.
      const char *raw_ext,IN: Raw data filename extension.
      hid_t raw_plist_idIN: File access property list identifier for the raw data file.
    -
    Returns: -
    Returns a non-negative value if successful. - Otherwise returns a negative value. -
    Example: -
    -
    -/* Example 1: Both metadata and rawdata files are in the same  */
    -/*    directory.   Use Station1-m.h5 and Station1-r.h5 as      */
    -/*    the metadata and rawdata files.                          */
    -hid_t fapl, fid;
    -fapl = H5Pcreate(H5P_FILE_ACCESS);
    -H5Pset_fapl_split(fapl, "-m.h5", H5P_DEFAULT, "-r.h5", H5P_DEFAULT);
    -fid=H5Fcreate("Station1",H5F_ACC_TRUNC,H5P_DEFAULT,fapl);
    -
    -/* Example 2: metadata and rawdata files are in different      */
    -/*    directories.  Use PointA-m.h5 and /pfs/PointA-r.h5 as    */
    -/*    the metadata and rawdata files.                          */
    -hid_t fapl, fid;
    -fapl = H5Pcreate(H5P_FILE_ACCESS);
    -H5Pset_fapl_split(fapl, "-m.h5", H5P_DEFAULT, "/pfs/%s-r.h5", H5P_DEFAULT);
    -fid=H5Fcreate("PointA",H5F_ACC_TRUNC,H5P_DEFAULT,fapl);
    - - -
    Fortran90 Interface: h5pset_fapl_split_f -
    -
    -SUBROUTINE h5pset_fapl_split_f(prp_id, meta_ext, meta_plist, raw_ext, &
    -                               raw_plist, hdferr)
    -  IMPLICIT NONE
    -  INTEGER(HID_T),INTENT(IN)   :: prp_id     ! Property list identifier
    -  CHARACTER(LEN=*),INTENT(IN) :: meta_ext   ! Name of the extension for
    -                                            ! the metafile filename
    -  INTEGER(HID_T),INTENT(IN)   :: meta_plist ! Identifier of the meta file
    -                                            ! access property list
    -  CHARACTER(LEN=*),INTENT(IN) :: raw_ext    ! Name extension for the raw 
    -                                            ! file filename
    -  INTEGER(HID_T),INTENT(IN)   :: raw_plist  ! Identifier of the raw file
    -                                            ! access property list
    -  INTEGER, INTENT(OUT) :: hdferr            ! Error code
    -                                            ! 0 on success and -1 on failure
    -END SUBROUTINE h5pset_fapl_split_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pset_fapl_srb -
    Signature: -
    herr_t H5Pset_fapl_srb( - hid_t fapl_id, - SRB_Info info - ) -
    Purpose: -
    Saves SRB connection handler and sets SRB settings. -
    Description: -
    H5Pset_fapl_srb stores the SRB client-to-server - connection handler SRB_CONN after the connection - is established and other user-supplied SRB information. -

    - The user-supplied SRB information is contained in the - SRB_Info struct pointed to by info - and is stored in the file access property list fapl_id. - This information can then be used to create or open a file. -

    Note: -
    H5Pset_fapl_gass is an experimental function. - It is designed for use only when accessing files via the - Storage Resource Broker (SRB). For further information, see - http//www.npaci.edu/Research/DI/srb/. -
    Parameters: -
      - - - - - - -
      hid_t fapl_id    IN: File access property list identifier.
      SRB_Info infoIN: Pointer to the SRB information structure.
    -
    Returns: -
    Returns a non-negative value if successful. - Otherwise returns a negative value. -
    Fortran90 Interface: -
    None. - - - -
    - - - -
    -
    -
    Name: H5Pset_fapl_stdio -
    Signature: -
    herr_t H5Pset_fapl_stdio( - hid_t fapl_id - ) -
    Purpose: -
    Sets the standard I/O driver. -
    Description: -
    H5Pset_fapl_stdio modifies the file access property list - to use the standard I/O driver, H5FD_STDIO. -
    Parameters: -
      - - - -
      hid_t fapl_id    IN: File access property list identifier.
    -
    Returns: -
    Returns a non-negative value if successful. - Otherwise returns a negative value. -
    Fortran90 Interface: h5pset_fapl_stdio_f -
    -
    -SUBROUTINE h5pset_fapl_stdio_f(prp_id, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN)    :: prp_id  ! Property list identifier
    -  INTEGER, INTENT(OUT)          :: hdferr  ! Error code
    -                                           ! 0 on success and -1 on failure
    -END SUBROUTINE h5pset_fapl_stdio_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pset_fapl_stream -
    Signature: -
    herr_t H5Pset_fapl_stream( - hid_t fapl_id, - H5FD_stream_fapl_t *fapl - ) -
    Purpose: -
    Sets up the use of the streaming I/O driver. -
    Description: -
    H5Pset_fapl_stream sets up the use of the - streaming I/O driver. -

    - fapl_id is the identifier for the - file access property list currently in use. -

    - fapl is the file access property list. -

    - The H5FD_stream_fapl_t struct contains the following - elements: -

    - - - - - - - - - - - - - - - - - - - -
    size_tincrement
    H5FD_STREAM_SOCKET_TYPEsocket
    hbool_tdo_socket_io
    unsigned intbacklog
    H5FD_stream_broadcast_tbroadcast_fn
    void *broadcast_arg
    -
      -
    • increment specifies how much memory to allocate - each time additional memory is required. -
    • socket is an external socket descriptor; - if a valid socket argument is provided, that socket will be used. -
    • do_socket_io is a boolean value specifying whether - to perform I/O on socket. -
    • backlog is the argument for the - listen call. -
    • broadcast_fn is the broadcast callback function. -
    • broadcast_arg is the user argument to - the broadcast callback function. -
    -
    -

    - H5Pset_fapl_stream and H5Pget_fapl_stream - are not intended for use in a parallel environment. -

    Parameters: -
      - - - - - - -
      hid_t fapl_idIN: File access property list identifier.
      H5FD_stream_fapl_t *fapl    IN: The streaming I/O file access property list.
    -
    Returns: -
    Returns a non-negative value if successful. - Otherwise returns a negative value. -
    Fortran90 Interface: -
    None. - - - -
    - - - -
    -
    -
    Name: H5Pset_fclose_degree -
    Signature: -
    herr_t H5Pset_fclose_degree(hid_t fapl_id, - H5F_close_degree_t fc_degree) -
    Purpose: -
    Sets the file close degree. -
    Description: -
    H5Pset_fclose_degree sets the file close degree property fc_degree - in the file access property list fapl_id.  -

    The value of fc_degree determines how aggressively H5Fclose - deals with objects within a file that remain open when H5Fclose - is called to close that file.  fc_degree can have any one of - four valid values: -

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Degree nameH5Fclose behavior with no open object - in fileH5Fclose behavior with open object(s) - in file
    H5F_CLOSE_WEAKActual file is closed.Access to file identifier is terminated; actual file - close is delayed until all objects in file are closed
    H5F_CLOSE_SEMIActual file is closed.Function returns FAILURE
    H5F_CLOSE_STRONGActual file is closed.All open objects remaining in the file are closed then - file is closed
    H5F_CLOSE_DEFAULTThe VFL driver chooses the behavior.  Currently, - all VFL drivers set this value to H5F_CLOSE_WEAK, except - for the MPI-I/O driver, which sets it to H5F_CLOSE_SEMI. -
    -
    -
    Parameters: -
      - - - - - - -
      hid_t fapl_idIN: File access property list identifier.
      H5F_close_degree_t fc_degree    IN: Pointer to a location containing the file close degree property, - the value of fc_degree.
    -
    Returns: -
    Returns a non-negative value if successful. Otherwise returns a negative - value. -
    Fortran90 Interface: h5pset_fclose_degree_f -
    -
    -SUBROUTINE h5pset_fclose_degree_f(fapl_id, degree, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: fapl_id  ! File access property list identifier
    -  INTEGER, INTENT(IN) :: degree          ! Info about file close behavior 
    -                                         ! Possible values:
    -                                         !    H5F_CLOSE_DEFAULT_F
    -                                         !    H5F_CLOSE_WEAK_F
    -                                         !    H5F_CLOSE_SEMI_F
    -                                         !    H5F_CLOSE_STRONG_F
    -  INTEGER, INTENT(OUT) :: hdferr         ! Error code
    -                                         ! 0 on success and -1 on failure
    -END SUBROUTINE h5pset_fclose_degree_f
    -	
    - - -
    - - -
    -
    -
    Name: H5Pset_fill_time -
    Signature: -
    herr_t H5Pset_fill_time(hid_t plist_id, - H5D_fill_time_t fill_time - ) -
    Purpose: -
    Sets the time when fill values are written to a dataset. -
    Description: -
    H5Pset_fill_time sets up the timing for writing fill values - to a dataset. - This property is set in the dataset creation property list plist_id. -

    - Timing is specified in fill_time with one of the following values: - -
         - H5D_FILL_TIME_IFSET   - - Write fill values to the dataset when storage space is allocated - only if there is a user-defined fill value, i.e., one set with - H5Pset_fill_value. -   (Default) -
      - H5D_FILL_TIME_ALLOC - - Write fill values to the dataset when storage space is allocated. -
      - H5D_FILL_TIME_NEVER - - Never write fill values to the dataset. -
    -

    Note: -
    H5Pset_fill_time is designed for coordination - with the dataset fill value and - dataset storage allocation time properties, set with the functions - H5Pset_fill_value and H5Pset_alloc_time. -

    - See H5Dcreate for - further cross-references. -

    Parameters: -
      - - - - - - -
      hid_t plist_idIN: Dataset creation property list identifier.
      H5D_fill_time_t fill_time    IN: When to write fill values to a dataset.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5pset_fill_time_f -
    -
    -SUBROUTINE h5pset_fill_time_f(plist_id, flag, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: plist_id ! Dataset creation property
    -                                         ! list identifier
    -  INTEGER(HSIZE_T), INTENT(IN) :: flag   ! File time flag
    -                                         ! Possible values are:
    -                                         !    H5D_FILL_TIME_ERROR_F
    -                                         !    H5D_FILL_TIME_ALLOC_F
    -                                         !    H5D_FILL_TIME_NEVER_F
    -  INTEGER, INTENT(OUT)       :: hdferr   ! Error code
    -                                         ! 0 on success and -1 on failure
    -END SUBROUTINE h5pset_fill_time_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pset_fill_value -
    Signature: -
    herr_t H5Pset_fill_value(hid_t plist_id, - hid_t type_id, - const void *value - ) -
    Purpose: -
    Sets the fill value for a dataset. -
    Description: -
    H5Pset_fill_value sets the fill value for - a dataset in the dataset creation property list. -

    - value is interpreted as being of datatype - type_id. This datatype may differ from that of - the dataset, but the HDF5 library must be able to convert - value to the dataset datatype when the dataset - is created. -

    - The default fill value is 0 (zero), which is - interpreted according to the actual dataset datatype. -

    - Setting value to NULL indicates - that the fill value is to be undefined. -

    Notes: -
    Applications sometimes write data only to portions of - an allocated dataset. It is often useful in such cases - to fill the unused space with a known - fill value. - This function allows the user application to set that fill value; - the functions - H5Dfill and - H5Pset_fill_time, - respectively, provide the ability - to apply the fill value on demand or - to set up its automatic application. -

    - A fill value should be defined so that it is appropriate for - the application. While the HDF5 default fill value is - 0 (zero), it is often appropriate to use another value. - It might be useful, for example, to use a value that is - known to be impossible for the application to legitimately generate. -

    - H5Pset_fill_value is designed to work in - concert with H5Pset_alloc_time and - H5Pset_fill_time. - H5Pset_alloc_time and H5Pset_fill_time - govern the timing of dataset storage allocation and fill value - write operations and can be important in tuning application - performance. -

    - See H5Dcreate for - further cross-references. -

    Parameters: -
      - - - - - - - - - -
      hid_t plist_idIN: Dataset creation property list identifier.
      hid_t type_id,IN: Datatype of value.
      const void *value    IN: Pointer to buffer containing value to use as fill value.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5pset_fill_value_f -
    -
    -SUBROUTINE h5pset_fill_value_f(prp_id, type_id, fillvalue, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: prp_id  ! Property list identifier 
    -  INTEGER(HID_T), INTENT(IN) :: type_id ! Datatype identifier of fill
    -                                        ! value datatype (in memory)
    -  TYPE(VOID), INTENT(IN) :: fillvalue   ! Fillvalue
    -  INTEGER, INTENT(OUT) :: hdferr        ! Error code
    -                                        ! 0 on success and -1 on failure
    -END SUBROUTINE h5pset_fill_value_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pset_filter -
    Signature: -
    herr_t H5Pset_filter(hid_t plist, - H5Z_filter_t filter, - unsigned int flags, - size_t cd_nelmts, - const unsigned int cd_values[] - ) -
    Purpose: -
    Adds a filter to the filter pipeline. -
    Description: -
    H5Pset_filter adds the specified - filter and corresponding properties to the - end of an output filter pipeline. - If plist is a dataset creation property list, - the filter is added to the permanent filter pipeline; - if plist is a dataset transfer property list, - the filter is added to the transient filter pipeline. -

    - The array cd_values contains - cd_nelmts integers which are auxiliary data - for the filter. The integer values will be stored in the - dataset object header as part of the filter information. -

    - The flags argument is a bit vector with - the following fields specifying certain general properties - of the filter: -

    - - - - - -
    H5Z_FLAG_OPTIONAL  If this bit is set then the filter is - optional. If the filter fails (see below) during an - H5Dwrite operation then the filter is - just excluded from the pipeline for the chunk for which - it failed; the filter will not participate in the - pipeline during an H5Dread of the chunk. - This is commonly used for compression filters: if the - filter result would be larger than the input, then - the compression filter returns failure and the - uncompressed data is stored in the file. If this bit is - clear and a filter fails, then H5Dwrite - or H5Dread also fails. -

    - This flag should not be set for the Fletcher32 checksum - filter as it will bypass the checksum filter without - reporting checksum errors to an application.

    -
    -

    - The filter parameter specifies the filter to be set. - Valid filter identifiers are as follows: - -

    - - - - - -
    - H5Z_FILTER_DEFLATE - - Data compression filter, employing the gzip algorithm -
    - H5Z_FILTER_SHUFFLE - - Data shuffling filter -
    - H5Z_FILTER_FLETCHER32   - - Error detection filter, employing the Fletcher32 checksum algorithm -
    - H5Z_FILTER_SZIP - - Data compression filter, employing the SZIP algorithm -
    -
    -

    - Also see H5Pset_edc_check and - H5Pset_filter_callback. - -

    Notes: -
    This function currently supports only the permanent filter - pipeline; plist must be a dataset creation - property list. -

    - If multiple filters are set for a property list, they will be - applied to each chunk in the order in which they were set. -

    Parameters: -
      - - - - - - - - - - - - - - - -
      hid_t plistIN: Property list identifier.
      H5Z_filter_t filterIN: Filter identifier for the filter - to be added to the pipeline.
      unsigned int flagsIN: Bit vector specifying certain general properties - of the filter.
      size_t cd_nelmtsIN: Number of elements in cd_values.
      const unsigned int cd_values[]    IN: Auxiliary data for the filter.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5pset_filter_f -
    -
    -SUBROUTINE h5pset_filter_f(prp_id, filter, flags, cd_nelmts, cd_values,  hdferr)
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: prp_id  ! Property list identifier
    -  INTEGER, INTENT(IN) :: filter         ! Filter to be added to the pipeline
    -  INTEGER, INTENT(IN) :: flags          ! Bit vector specifying certain 
    -                                        ! general properties of the filter
    -  INTEGER(SIZE_T), INTENT(IN) :: cd_nelmts        
    -                                        ! Number of elements in cd_values
    -  INTEGER, DIMENSION(*), INTENT(IN) :: cd_values  
    -                                        ! Auxiliary data for the filter
    -  INTEGER, INTENT(OUT) :: hdferr        ! Error code
    -                                        ! 0 on success and -1 on failure
    -END SUBROUTINE h5pset_filter_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pset_filter_callback -
    Signature: -
    herr_t H5Pset_filter_callback(hid_t plist, - H5Z_filter_func_t func, - void *op_data) -
    Purpose: -
    Sets user-defined filter callback function. -
    Description: -
    H5Pset_filter_callback sets the user-defined - filter callback function func in the - dataset transfer property list plist. -

    - The parameter op_data is a pointer to user-defined - input data for the callback function and will be passed through - to the callback function. -

    - The callback function func defines the actions - an application is to take when a filter fails. - The function prototype is as follows: -

    - typedef H5Z_cb_return_t (H5Z_filter_func_t) - (H5Z_filter_t filter, - void *buf, - size_t buf_size, - void *op_data) - -

    - where filter indicates which filter has failed, - buf and buf_size are used to pass in - the failed data, - and op_data is the required input data for this - callback function. -

    - Valid callback function return values are - H5Z_CB_FAIL and H5Z_CB_CONT.   -

    Parameters: -
      - - - - - - - - - -
      hid_t plistIN: Dataset transfer property list identifier.
      H5Z_filter_func_t func    IN: User-defined filter callback function.
      void *op_dataIN: User-defined input data for the callback function.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: -
    None. - - - -
    - - - -
    -
    -
    Name: H5Pset_fletcher32 -
    Signature: -
    herr_t H5Pset_fletcher32(hid_t plist) -
    Purpose: -
    Sets up use of the Fletcher32 checksum filter. -
    Description: -
    H5Pset_fletcher32 sets the Fletcher32 checksum filter - in the dataset creation property list plist.   -
    Note: -
    The initial error detection implementation supports - error detection for chunked datasets only. -
    Parameters: -
      - - - -
      hid_t plist    IN: Dataset creation property list identifier.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5pset_fletcher32_f -
    -
    -SUBROUTINE h5pset_fletcher32_f(prp_id, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: prp_id  ! Dataset creation property list 
    -                                        ! identifier 
    -  INTEGER, INTENT(OUT)       :: hdferr  ! Error code
    -                                        ! 0 on success and -1 on failure
    -END SUBROUTINE h5pset_fletcher32_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pset_gc_references -
    Signature: -
    herr_t H5Pset_gc_reference(hid_t plist, - unsigned gc_ref - ) -
    Purpose: -
    Sets garbage collecting references flag. -
    Description: -
    H5Pset_gc_references sets the flag for - garbage collecting references for the file. -

    - Dataset region references and other reference types use space - in an HDF5 file's global heap. If garbage collection is on - and the user passes in an uninitialized value in a reference structure, - the heap might get corrupted. When garbage collection is off, however, - and the user re-uses a reference, the previous heap block will be - orphaned and not returned to the free heap space. -

    - When garbage collection is on, the user must initialize the - reference structures to 0 or risk heap corruption. -

    - The default value for garbage collecting references is off. -

    Parameters: -
      - - - - - - -
      hid_t plistIN: File access property list identifier.
      unsigned gc_ref    IN: Flag setting reference garbage collection to - on (1) or off (0).
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5pset_gc_references_f -
    -
    -SUBROUTINE h5pset_gc_references_f (prp_id, gc_reference, hdferr)
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: prp_id ! Property list identifier
    -  INTEGER, INTENT(IN) :: gc_reference  ! Flag for garbage collecting
    -                                       ! references for the file
    -  INTEGER, INTENT(OUT) :: hdferr       ! Error code
    -                                       ! 0 on success and -1 on failure
    -END SUBROUTINE h5pset_gc_references_f
    -	
    - - -
    - - - - -
    -
    -
    Name: H5Pset_hyper_vector_size -
    Signature: -
    herr_t H5Pset_hyper_vector_size(hid_t dxpl_id, - size_t vector_size - ) -
    Purpose: -
    Sets number of I/O vectors to be read/written in hyperslab I/O. -
    Description: -
    H5Pset_hyper_vector_size sets the number of - I/O vectors to be accumulated in memory before being issued - to the lower levels of the HDF5 library for reading or writing the - actual data. -

    - The I/O vectors are hyperslab offset and length pairs - and are generated during hyperslab I/O. -

    - The number of I/O vectors is passed in vector_size - to be set in the dataset transfer property list dxpl_id. - vector_size must be greater than 1 (one). -

    - H5Pset_hyper_vector_size is an I/O optimization function; - increasing vector_size should provide better performance, - but the library will use more memory during hyperslab I/O. - The default value of vector_size is 1024. -

    Parameters: -
      - - - - - - -
      hid_t dxpl_idIN: Dataset transfer property list identifier.
      size_t vector_size    IN: Number of I/O vectors to accumulate in memory for I/O operations. - Must be greater than 1 (one). Default value: 1024.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5pset_hyper_vector_size_f -
    -
    -SUBROUTINE h5pset_hyper_vector_size_f(plist_id, size, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: plist_id ! Dataset transfer property list 
    -                                         ! identifier
    -  INTEGER(SIZE_T), INTENT(IN) :: size    ! Vector size 
    -  INTEGER, INTENT(OUT)       :: hdferr   ! Error code
    -                                         ! 0 on success and -1 on failure
    -END SUBROUTINE h5pset_hyper_vector_size_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pset_istore_k -
    Signature: -
    herr_t H5Pset_istore_k(hid_t plist, - unsigned ik - ) -
    Purpose: -
    Sets the size of the parameter used to control the - B-trees for indexing chunked datasets. -
    Description: -
    H5Pset_istore_k sets the size of the parameter - used to control the B-trees for indexing chunked datasets. - This function is only valid for file creation property lists. -

    - ik is one half the rank of a tree that stores - chunked raw data. On average, such a tree will be 75% full, - or have an average rank of 1.5 times the value of - ik. -

    Parameters: -
      - - - - - - -
      hid_t plist    IN: Identifier of property list to query.
      unsigned ikIN: 1/2 rank of chunked storage B-tree.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5pset_istore_k_f -
    -
    -SUBROUTINE h5pset_istore_k_f (prp_id, ik, hdferr)
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: prp_id ! Property list identifier
    -  INTEGER, INTENT(IN) :: ik            ! 1/2 rank of chunked storage B-tree
    -  INTEGER, INTENT(OUT) :: hdferr       ! Error code
    -                                       ! 0 on success and -1 on failure
    -END SUBROUTINE h5pset_istore_k_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pset_layout -
    Signature: -
    herr_t H5Pset_layout(hid_t plist, - H5D_layout_t layout - ) -
    Purpose: -
    Sets the type of storage used to store the raw data for a dataset. -
    Description: -
    H5Pset_layout sets the type of storage used to store the - raw data for a dataset. - This function is only valid for dataset creation property lists. -

    - Valid values for layout are: -

      -
      H5D_COMPACT -
      Store raw data in the dataset object header in file. - This should only be used for very small amounts of raw - data. - The current limit is approximately 64K (HDF5 Release 1.6). -
      H5D_CONTIGUOUS -
      Store raw data separately from the object header in one - large chunk in the file. -
      H5D_CHUNKED -
      Store raw data separately from the object header as - chunks of data in separate locations in the file. -
    -

    - Note that a compact storage layout may affect writing data to - the dataset with parallel applications. See note in - H5Dwrite - documentation for details. -

    Parameters: -
      - - - - - - -
      hid_t plistIN: Identifier of property list to query.
      H5D_layout_t layout    IN: Type of storage layout for raw data.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5pset_layout_f -
    -
    -SUBROUTINE h5pset_layout_f (prp_id, layout, hdferr)
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: prp_id ! Property list identifier
    -  INTEGER, INTENT(IN) :: layout        ! Type of storage layout for raw data
    -                                       ! Possible values are:
    -                                       !    H5D_COMPACT_F
    -                                       !    H5D_CONTIGUOUS_F
    -                                       !    H5D_CHUNKED_F
    -  INTEGER, INTENT(OUT) :: hdferr       ! Error code
    -                                       ! 0 on success and -1 on failure
    -END SUBROUTINE h5pset_layout_f
    -	
    - - -
    - - - - - - - -
    -
    -
    Name: H5Pset_meta_block_size -
    Signature: -
    herr_t H5Pset_meta_block_size( - hid_t fapl_id, - hsize_t size - ) -
    Purpose: -
    Sets the minimum metadata block size. -
    Description: -
    H5Pset_meta_block_size sets the - minimum size, in bytes, of metadata block allocations when - H5FD_FEAT_AGGREGATE_METADATA is set by a VFL driver. -

    - Each raw metadata block is initially allocated to be of the - given size. Specific metadata objects (e.g., object headers, - local heaps, B-trees) are then sub-allocated from this block. -

    - The default setting is 2048 bytes, meaning that the library - will attempt to aggregate metadata in at least 2K blocks in the file. - Setting the value to 0 (zero) with this function - will turn off metadata aggregation, even if the VFL driver attempts - to use the metadata aggregation strategy. -

    - Metadata aggregation reduces the number of small data objects - in the file that would otherwise be required for metadata. - The aggregated block of metadata is usually written in a - single write action and always in a contiguous block, - potentially significantly improving library and application - performance. -

    Parameters: -
      - - - - - - -
      hid_t fapl_id    IN: File access property list identifier.
      hsize_t sizeIN: Minimum size, in bytes, of metadata block allocations.
    -
    Returns: -
    Returns a non-negative value if successful. - Otherwise returns a negative value. -
    Fortran90 Interface: h5pset_meta_block_size_f -
    -
    -SUBROUTINE h5pset_meta_block_size_f(plist_id, size, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: plist_id ! File access property list 
    -                                         ! identifier
    -  INTEGER(HSIZE_T), INTENT(IN) :: size   ! Metadata block size
    -  INTEGER, INTENT(OUT)       :: hdferr   ! Error code
    -                                         ! 0 on success and -1 on failure
    -END SUBROUTINE h5pset_meta_block_size_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pset_multi_type -
    Signature: -
    herr_t H5Pset_multi_type ( - hid_t fapl_id, - H5FD_mem_t type - ) -
    Purpose: -
    Sets data type property for MULTI driver. -
    Description: -
    H5Pset_multi_type sets the data type property in the - file access or data transfer property list fapl_id. - This enables a user application to specify the type of data the - application wishes to access so that the application - can retrieve a file handle for low-level access to the particular member - of a set of MULTI files in which that type of data is stored. - The file handle is retrieved with a separate call - to H5Fget_vfd_handle - (or, in special circumstances, to H5FDget_vfd_handle; - see Virtual File Layer and List of VFL Functions - in HDF5 Technical Notes). -

    - The type of data specified in type may be one of the following: - - - - - - - - -
         - H5FD_MEM_DEFAULT   - - Need description.... -
      - H5FD_MEM_SUPER - - Super block ... need description.... -
      - H5FD_MEM_BTREE - - Btree ... need description.... -
      - H5FD_MEM_DRAW - - Need description.... -
      - H5FD_MEM_GHEAP - - Global heap ... need description.... -
      - H5FD_MEM_LHEAP - - Local Heap ... need description.... -
      - H5FD_MEM_OHDR - - Need description.... -
    -

    - Use of this function is only appropriate for an HDF5 file written - as a set of files with the MULTI file driver. -

    Parameters: -
      - - - - - - -
      hid_t fapl_idIN: File access property list or data transfer property list identifier.
      H5FD_mem_t type    OUT: Type of data.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: -
    None. - - - -
    - - - -
    -
    -
    Name: H5Pset_preserve -
    Signature: -
    herr_t H5Pset_preserve(hid_t plist, - hbool_t status - ) -
    Purpose: -
    Sets the dataset transfer property list status to TRUE or FALSE. -
    Description: -
    H5Pset_preserve sets the - dataset transfer property list status to TRUE or FALSE. -

    - When reading or writing compound data types and the - destination is partially initialized and the read/write is - intended to initialize the other members, one must set this - property to TRUE. Otherwise the I/O pipeline treats the - destination datapoints as completely uninitialized. -

    Parameters: -
      - - - - - - -
      hid_t plistIN: Identifier for the dataset transfer property list.
      hbool_t status    IN: Status of for the dataset transfer property list - (TRUE/FALSE).
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5pset_preserve_f -
    -
    -SUBROUTINE h5pset_preserve_f(prp_id, flag, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: prp_id   ! Dataset transfer property 
    -                                         ! list identifier 
    -  LOGICAL, INTENT(IN)        :: flag     ! Status for the dataset 
    -                                         ! transfer property list 
    -  INTEGER, INTENT(OUT)       :: hdferr   ! Error code
    -                                         ! 0 on success and -1 on failure
    -END SUBROUTINE h5pset_preserve_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pset_shuffle -
    Signature: -
    herr_t H5Pset_shuffle(hid_t plist_id) -
    Purpose: -
    Sets up use of the shuffle filter. -
    Description: -
    H5Pset_shuffle sets the shuffle filter, - H5Z_FILTER_SHUFFLE, - in the dataset creation property list plist_id.   -

    - The shuffle filter de-interlaces - a block of data by reordering the bytes. - All the bytes from one consistent byte position of - each data element are placed together in one block; - all bytes from a second consistent byte position of - each data element are placed together a second block; etc. - For example, given three data elements of a 4-byte datatype - stored as 012301230123, - shuffling will re-order data as 000111222333. - This can be a valuable step in an effective compression - algorithm because the bytes in each byte position are often - closely related to each other and putting them together - can increase the compression ratio. -

    - As implied above, the primary value of the shuffle filter - lies in its coordinated use with a compression filter; - it does not provide data compression when used alone. - When the shuffle filter is applied to a dataset - immediately prior to the use of a compression filter, - the compression ratio achieved is often superior to that - achieved by the use of a compression filter without - the shuffle filter. -

    Parameters: -
      - - - -
      hid_t plist_id    IN: Dataset creation property list identifier.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5pset_shuffle_f -
    -
    -SUBROUTINE h5pset_shuffle_f(prp_id, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: prp_id      ! Property list identifier 
    -  INTEGER, INTENT(OUT)       :: hdferr      ! Error code
    -                                            ! 0 on success and -1 on failure
    -END SUBROUTINE h5pset_shuffle_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pset_sieve_buf_size -
    Signature: -
    herr_t H5Pset_sieve_buf_size( - hid_t fapl_id, - hsize_t size - ) -
    Purpose: -
    Sets the maximum size of the data sieve buffer. -
    Description: -
    H5Pset_sieve_buf_size sets size, - the maximum size in bytes of the data sieve buffer, which is - used by file drivers that are capable of using data sieving. -

    - The data sieve buffer is used when performing I/O on datasets - in the file. Using a buffer which is large enough to hold - several pieces of the dataset being read in for - hyperslab selections boosts performance by quite a bit. -

    - The default value is set to 64KB, indicating that file I/O for - raw data reads and writes will occur in at least 64KB blocks. - Setting the value to 0 with this API function will turn off the - data sieving, even if the VFL driver attempts to use that strategy. -

    Parameters: -
      - - - - - - -
      hid_t fapl_id    IN: File access property list identifier.
      hsize_t sizeIN: Maximum size, in bytes, of data sieve buffer.
    -
    Returns: -
    Returns a non-negative value if successful. - Otherwise returns a negative value. -
    Fortran90 Interface: h5pset_sieve_buf_size_f -
    -
    -SUBROUTINE h5pset_sieve_buf_size_f(plist_id, size, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: plist_id ! File access property list 
    -                                         ! identifier
    -  INTEGER(SIZE_T), INTENT(IN) :: size    ! Sieve buffer size 
    -  INTEGER, INTENT(OUT)       :: hdferr   ! Error code
    -                                         ! 0 on success and -1 on failure
    -END SUBROUTINE h5pset_sieve_buf_size_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pset_sizes -
    Signature: -
    herr_t H5Pset_sizes(hid_t plist, - size_t sizeof_addr, - size_t sizeof_size - ) -
    Purpose: -
    Sets the byte size of the offsets and lengths used to address objects - in an HDF5 file. -
    Description: -
    H5Pset_sizes sets the byte size of the offsets - and lengths used to address objects in an HDF5 file. - This function is only valid for file creation property lists. - Passing in a value of 0 for one of the sizeof_... - parameters retains the current value. - The default value for both values is the same as - sizeof(hsize_t) in the library (normally 8 bytes). - Valid values currently are 2, 4, 8 and 16. -
    Parameters: -
      - - - - - - - - - -
      hid_t plistIN: Identifier of property list to modify.
      size_t sizeof_addr    IN: Size of an object offset in bytes.
      size_t sizeof_sizeIN: Size of an object length in bytes.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5pset_sizes_f -
    -
    -SUBROUTINE h5pset_sizes_f (prp_id, sizeof_addr, sizeof_size, hdferr)
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: prp_id       ! Property list identifier
    -  INTEGER(SIZE_T), INTENT(IN) :: sizeof_addr ! Size of an object offset 
    -                                             ! in bytes
    -  INTEGER(SIZE_T), INTENT(IN) :: sizeof_size ! Size of an object length 
    -                                             ! in bytes
    -  INTEGER, INTENT(OUT) :: hdferr             ! Error code
    -                                             ! 0 on success and -1 on failure
    -END SUBROUTINE h5pset_sizes_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pset_small_data_block_size -
    Signature: -
    herr_t H5Pset_small_data_block_size(hid_t fapl_id, - hsize_t size - ) -
    Purpose: -
    Sets the size of a contiguous block reserved for small data. -
    Description: -
    H5Pset_small_data_block_size reserves blocks of - size bytes for the contiguous storage of the raw data - portion of small datasets. - The HDF5 library then writes the raw data from small datasets - to this reserved space, thus reducing unnecessary discontinuities - within blocks of meta data and improving IO performance. -

    - A small data block is actually allocated the first time a - qualifying small dataset is written to the file. - Space for the raw data portion of this small dataset is suballocated - within the small data block. - The raw data from each subsequent small dataset is also written to - the small data block until it is filled; additional small data blocks - are allocated as required. -

    - The HDF5 library employs an algorithm that determines whether - IO performance is likely to benefit from the use of this mechanism - with each dataset as storage space is allocated in the file. - A larger size will result in this mechanism being - employed with larger datasets. -

    - The small data block size is set as an allocation property in the - file access property list identified by fapl_id. -

    - Setting size to zero (0) disables the - small data block mechanism. -

    Parameters: -
      - - - - - - -
      hid_t fapl_id    IN: File access property list identifier.
      hsize_t sizeIN: Maximum size, in bytes, of the small data block. -
      - The default size is 2048.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise a negative value. -
    Fortran90 Interface: h5pset_small_data_block_size_f -
    -
    -SUBROUTINE h5pset_small_data_block_size_f(plist_id, size, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: plist_id ! File access
    -                                         ! property list identifier
    -  INTEGER(HSIZE_T), INTENT(IN) :: size   ! Small raw data block size
    -  INTEGER, INTENT(OUT)       :: hdferr   ! Error code
    -                                         ! 0 on success and -1 on failure
    -END SUBROUTINE h5pset_small_data_block_size_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pset_sym_k -
    Signature: -
    herr_t H5Pset_sym_k(hid_t plist, - unsigned ik, - unsigned lk - ) -
    Purpose: -
    Sets the size of parameters used to control the symbol table nodes. -
    Description: -
    H5Pset_sym_k sets the size of parameters used to - control the symbol table nodes. This function is only valid - for file creation property lists. Passing in a value of 0 for - one of the parameters retains the current value. -

    - ik is one half the rank of a tree that stores a symbol - table for a group. Internal nodes of the symbol table are on - average 75% full. That is, the average rank of the tree is - 1.5 times the value of ik. -

    - lk is one half of the number of symbols that can - be stored in a symbol table node. A symbol table node is the - leaf of a symbol table tree which is used to store a group. - When symbols are inserted randomly into a group, the group's - symbol table nodes are 75% full on average. That is, they - contain 1.5 times the number of symbols specified by - lk. -

    Parameters: -
      - - - - - - - - - -
      hid_t plist    IN: Identifier for property list to query.
      unsigned ikIN: Symbol table tree rank.
      unsigned lkIN: Symbol table node size.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5pset_sym_k_f -
    -
    -SUBROUTINE h5pset_sym_k_f (prp_id, ik, lk, hdferr)
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: prp_id ! Property list identifier
    -  INTEGER, INTENT(IN) :: ik            ! Symbol table tree rank
    -  INTEGER, INTENT(IN) :: lk            ! Symbol table node size
    -  INTEGER, INTENT(OUT) :: hdferr       ! Error code
    -                                       ! 0 on success and -1 on failure
    -END SUBROUTINE h5pset_sym_k_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pset_szip -
    Signature: -
    herr_t H5Pset_szip(hid_t plist, - unsigned int options_mask, - unsigned int pixels_per_block) -
    Purpose: -
    Sets up use of the SZIP compression filter. -
    Description: -
    H5Pset_szip sets an SZIP compression filter, - H5Z_FILTER_SZIP, for a dataset. - SZIP is a compression method designed for use with scientific data. -

    - Before proceeding, be aware that there are factors that affect - your rights and ability to use SZIP compression. - See the documents at - SZIP Compression in HDF5 - for important information regarding terms of use and - the SZIP copyright notice, - for further discussion of SZIP compression in HDF5, - and for a list of SZIP-related references. - -

    - In the text below, the term pixel refers to - an HDF5 data element. - This terminology derives from SZIP compression's use with image data, - where pixel referred to an image pixel. -

    - The SZIP bits_per_pixel value (see Notes, below) - is automatically set, based on the HDF5 datatype. - SZIP can be used with atomic datatypes that may have size - of 8, 16, 32, or 64 bits. - Specifically, a dataset with a datatype that is - 8-, 16-, 32-, or 64-bit - signed or unsigned integer; - char; or - 32- or 64-bit float - can be compressed with SZIP. - See Notes, below, for further discussion of the - the SZIP bits_per_pixel setting. - -

    - SZIP compression cannot be applied to - compound datatypes, - array datatypes, - variable-length datatypes, - enumerations, or - any other user-defined datatypes. - If an SZIP filter is set up for a dataset containing a non-allowed - datatype, H5Pset_szip will succeed but the subsequent call - to H5Dcreate - will fail; - the conflict is detected only when the property list is used. - - -

    - SZIP options are passed in an options mask, options_mask, - as follows. -

    - - - - - -
    -
    - Option -
    -
    - Description -
    - (Mutually exclusive; select one.) -
    -
    - H5_SZIP_EC_OPTION_MASK   -
    -
    - Selects entropy coding method. -
    - H5_SZIP_NN_OPTION_MASK - - Selects nearest neighbor coding method. -
    -
    -
    -
    -
    -
    - The following guidelines can be used in determining - which option to select: -
      -
    • The entropy coding method, the EC option specified by - H5_SZIP_EC_OPTION_MASK, is best suited for - data that has been processed. - The EC method works best for small numbers. -
    • The nearest neighbor coding method, the NN option - specified by H5_SZIP_NN_OPTION_MASK, - preprocesses the data then the applies EC method as above. -
    - Other factors may affect results, but the above criteria - provides a good starting point for optimizing data compression. - -

    - SZIP compresses data block by block, with a user-tunable block size. - This block size is passed in the parameter - pixels_per_block and must be even and not greater than 32, - with typical values being 8, 10, - 16, or 32. - This parameter affects compression ratio; - the more pixel values vary, the smaller this number should be to - achieve better performance. -

    - In HDF5, compression can be applied only to chunked datasets. - If pixels_per_block is bigger than the total - number of elements in a dataset chunk, - H5Pset_szip will succeed but the subsequent call to - H5Dcreate - will fail; the conflict is detected only when the property list - is used. -

    - To achieve optimal performance for SZIP compression, - it is recommended that a chunk's fastest-changing dimension - be equal to N times pixels_per_block - where N is the maximum number of blocks per scan line - allowed by the SZIP library. - In the current version of SZIP, N is set to 128. -

    - H5Pset_szip will fail if SZIP encoding is - disabled in the available copy of the SZIP library. - - H5Zget_filter_info can be employed - to avoid such a failure. -

    Parameters: -
      - - - - - - - - - -
      hid_t plistIN: Dataset creation property list - identifier.
      unsigned int options_maskIN: A bit-mask conveying the desired SZIP options. - Valid values are H5_SZIP_EC_OPTION_MASK - and H5_SZIP_NN_OPTION_MASK.
      unsigned int pixels_per_block    IN: The number of pixels or data elements in each data block.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Notes: -
    The following notes are of interest primarily to those who have - used SZIP compression outside of the HDF5 context. -

    - In non-HDF5 applications, SZIP typically requires that the - user application supply additional parameters: -

      -
    • pixels_in_object, - the number of pixels in the object to be compressed -
    • bits_per_pixel, - the number of bits per pixel -
    • pixels_per_scanline, - the number of pixels per scan line -
    -

    - These values need not be independently supplied in the HDF5 - environment as they are derived from the datatype and dataspace, - which are already known. - In particular, HDF5 sets - pixels_in_object to the number of elements in a chunk - and bits_per_pixel to the size of the element or - pixel datatype. - The following algorithm is used to set - pixels_per_scanline: -

      -
    • If the size of a chunk's fastest-changing dimension, - size, is greater than 4K, - set pixels_per_scanline to - 128 times pixels_per_block. -
    • If size is less than 4K - but greater than pixels_per_block, - set pixels_per_scanline to the minimum of - size and 128 times pixels_per_block. -
    • If size is less than pixels_per_block - but greater than the number elements in the chunk, - set pixels_per_scanline to the minimum of - the number elements in the chunk and - 128 times pixels_per_block. -
    - -

    - The HDF5 datatype may have precision that is less than the - full size of the data element, e.g., an 11-bit integer can be - defined using - H5Tset_precision. - To a certain extent, SZIP can take advantage of the - precision of the datatype to improve compression: -

    • - If the HDF5 datatype size is 24-bit or less and - the offset of the bits in the HDF5 datatype is zero - (see H5Tset_offset - or H5Tget_offset), - the data is the in lowest N bits of the data element. - In this case, the SZIP bits_per_pixel - is set to the precision - of the HDF5 datatype. -
    • - If the offset is not zero, the SZIP bits_per_pixel - will be set to the number of bits in the full size of the data - element. -
    • - If the HDF5 datatype precision is 25-bit to 32-bit, - the SZIP bits_per_pixel will be set to 32. -
    • - If the HDF5 datatype precision is 33-bit to 64-bit, - the SZIP bits_per_pixel will be set to 64. -
    - -

    - HDF5 always modifies the options mask provided by the user - to set up usage of RAW_OPTION_MASK, - ALLOW_K13_OPTION_MASK, and one of - LSB_OPTION_MASK or MSB_OPTION_MASK, - depending on endianness of the datatype. - -

    Fortran90 Interface: h5pset_szip_f -
    -
    -SUBROUTINE h5pset_szip_f(prp_id, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: prp_id     
    -                                   ! Dataset creation property list identifier 
    -  INTEGER, INTENT(IN) :: options_mask      
    -                                   ! A bit-mask conveying the desired
    -                                   ! SZIP options
    -                                   ! Current valid values in Fortran are:
    -                                   !    H5_SZIP_EC_OM_F
    -                                   !    H5_SZIP_NN_OM_F
    -  INTEGER, INTENT(IN) :: pixels_per_block  
    -                                   ! The number of pixels or data elements 
    -                                   ! in each data block
    -  INTEGER, INTENT(OUT)  :: hdferr  ! Error code
    -                                   ! 0 on success and -1 on failure
    -END SUBROUTINE h5pset_szip_f
    -	
    - - - - -
    - - - -
    -
    -
    Name: H5Pset_userblock -
    Signature: -
    herr_t H5Pset_userblock(hid_t plist, - hsize_t size - ) -
    Purpose: -
    Sets user block size. -
    Description: -
    H5Pset_userblock sets the user block size of a - file creation property list. - The default user block size is 0; it may be set to any - power of 2 equal to 512 or greater (512, 1024, 2048, etc.). -
    Parameters: -
      - - - - - - -
      hid_t plistIN: Identifier of property list to modify.
      hsize_t size    IN: Size of the user-block in bytes.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5pset_userblock_f -
    -
    -SUBROUTINE h5pset_userblock_f (prp_id, size, hdferr)
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: prp_id  ! Property list identifier
    -  INTEGER(HSIZE_T), INTENT(IN) :: size  ! Size of the user-block in bytes
    -  INTEGER, INTENT(OUT) :: hdferr        ! Error code
    -                                        ! 0 on success and -1 on failure
    -END SUBROUTINE h5pset_userblock_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Pset_vlen_mem_manager -
    Signature: -
    herr_t H5Pset_vlen_mem_manager(hid_t plist, - H5MM_allocate_t alloc, - void *alloc_info, - H5MM_free_t free, - void *free_info - ) -
    Purpose: -
    Sets the memory manager for variable-length datatype allocation in - H5Dread and H5Dvlen_reclaim. -
    Description: -
    H5Pset_vlen_mem_manager sets the memory manager for - variable-length datatype allocation in H5Dread - and free in H5Dvlen_reclaim. -

    - The alloc and free parameters - identify the memory management routines to be used. - If the user has defined custom memory management routines, - alloc and/or free should be set to make - those routine calls (i.e., the name of the routine is used as - the value of the parameter); - if the user prefers to use the system's malloc - and/or free, the alloc and - free parameters, respectively, should be set to - NULL -

    - The prototypes for these user-defined functions would appear as follows: -
         - typedef void *(*H5MM_allocate_t)(size_t size, - void *alloc_info) ; - -
         - typedef void (*H5MM_free_t)(void *mem, - void *free_info) ; -
    - The alloc_info and free_info parameters - can be used to pass along any required information to - the user's memory management routines. -

    - In summary, if the user has defined custom memory management - routines, the name(s) of the routines are passed in the - alloc and free parameters and the - custom routines' parameters are passed in the - alloc_info and free_info parameters. - If the user wishes to use the system malloc and - free functions, the alloc and/or - free parameters are set to NULL - and the alloc_info and free_info - parameters are ignored. -

    Parameters: -
      - - - - - - - - - - - - - - - -
      hid_t plistIN: Identifier for the dataset transfer property list.
      H5MM_allocate_t alloc    IN: User's allocate routine, or   NULL - for system   malloc.
      void *alloc_infoIN: Extra parameter for user's allocation routine. -
      - Contents are ignored if preceding parameter is   - NULL.
      H5MM_free_t freeIN: User's free routine, or   NULL - for system free.
      void *free_infoIN: Extra parameter for user's free routine. -
      - Contents are ignored if preceding parameter is   - NULL.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: -
    None. - - - -
    - - - -
    -
    -
    Name: H5Punregister - -
    Signature: -
    herr_t H5Punregister( - H5P_class_t class, - const char *name - ) - -
    Purpose: -
    Removes a property from a property list class. - -
    Description: -
    H5Punregister removes a property from a - property list class. - -

    - Future property lists created of that class will not contain - this property; - existing property lists containing this property are not affected. - -

    Parameters: -
      - - - - - - -
      H5P_class_t class    IN: Property list class from which to remove - permanent property
      const char *nameIN: Name of property to remove
    - -
    Returns: -
    Success: a non-negative value -
    Failure: a negative value -
    Fortran90 Interface: h5punregister_f -
    -
    -SUBROUTINE h5punregister_f(class, name, hdferr)
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: class  ! Property list class identifier
    -  CHARACTER(LEN=*), INTENT(IN) :: name ! Name of property to remove
    -  INTEGER, INTENT(OUT) :: hdferr       ! Error code
    -                                       ! 0 on success and -1 on failure
    -END SUBROUTINE h5punregister_f
    -	
    - - -
    - - - - -
    -
    - - - -
    -HDF5 documents and links 
    -Introduction to HDF5 
    -HDF5 User Guide 
    - -
    -And in this document, the -HDF5 Reference Manual   -
    -H5IM   -H5LT   -H5PT   -H5TB   -
    -H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5I   -H5P   -
    -H5R   -H5S   -H5T   -H5Z   -Tools   -Datatypes   -
    -
    -
    -HDF Help Desk -
    -Describes HDF5 Release 1.7, the unreleased development branch; working toward HDF5 Release 1.8.0 -
    - - - - diff --git a/doc/html/RM_H5R.html b/doc/html/RM_H5R.html deleted file mode 100644 index 2113732..0000000 --- a/doc/html/RM_H5R.html +++ /dev/null @@ -1,543 +0,0 @@ - - -HDF5/H5R API Specification - - - - - - - - - - - - - -
    -
    - - - -
    -HDF5 documents and links 
    -Introduction to HDF5 
    -HDF5 User Guide 
    - -
    -And in this document, the -HDF5 Reference Manual   -
    -H5IM   -H5LT   -H5PT   -H5TB   -
    -H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5I   -H5P   -
    -H5R   -H5S   -H5T   -H5Z   -Tools   -Datatypes   -
    -
    -
    -

    H5R: Reference Interface

    -
    - -

    Reference API Functions

    - -The Reference interface allows the user to create references -to specific objects and data regions in an HDF5 file. - -

    -The C Interfaces: - - - -
    - - - - -
      -   -
    -
    - -

    - -Alphabetical Listing - - - - - - - - - - - - - -
    - -        - -        - -
    - -
    -The FORTRAN90 Interfaces: -
    -In general, each FORTRAN90 subroutine performs exactly the same task -as the corresponding C function. -
    - - - -
    - - - - -
      - -
    -
    - - -

    - - - - - -


    -
    -
    Name: H5Rcreate -
    Signature: -
    herr_t H5Rcreate(void *ref, - hid_t loc_id, - const char *name, - H5R_type_t ref_type, - hid_t space_id - ) -
    Purpose: -
    Creates a reference. -
    Description: -
    H5Rcreate creates the reference, ref, - of the type specified in ref_type, pointing to - the object name located at loc_id. -

    - The HDF5 library maps the void type specified above - for ref to the type specified in ref_type, - which will be one of those appearing in the first column of - the following table. - The second column of the table lists the HDF5 constant associated - with each reference type. -

    - - - - - - - -
    hdset_reg_ref_t  H5R_DATASET_REGION  Dataset region reference
    hobj_ref_tH5R_OBJECTObject reference
    -
    -

    - The parameters loc_id and name are - used to locate the object. -

    - The parameter space_id identifies the region - to be pointed to for a dataset region reference. - This parameter is unused with object references. -

    Parameters: -
      - - - - - - - - - - - - - - - -
      void *refOUT: Reference created by the function call.
      hid_t loc_idIN: Location identifier used to locate the object being - pointed to.
      const char *nameIN: Name of object at location loc_id.
      H5R_type_t ref_type    IN: Type of reference.
      hid_t space_idIN: Dataspace identifier with selection. - Used for dataset region references.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5rcreate_f - -

    To create an object reference -

    -
    -SUBROUTINE h5rcreate_f(loc_id, name, ref, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: loc_id     ! Location identifier 
    -  CHARACTER(LEN=*), INTENT(IN) :: name     ! Name of the object at location 
    -                                           ! specified by loc_id identifier 
    -  TYPE(hobj_ref_t_f), INTENT(OUT) :: ref   ! Object reference 
    -  INTEGER, INTENT(OUT) :: hdferr           ! Error code 
    -
    -END SUBROUTINE h5rcreate_f
    -	
    - - -
    To create a region reference -
    -
    -SUBROUTINE h5rcreate_f(loc_id, name, space_id, ref, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: loc_id        ! Location identifier 
    -  CHARACTER(LEN=*), INTENT(IN) :: name        ! Name of the dataset at location 
    -                                              ! specified by loc_id identifier 
    -  INTEGER(HID_T), INTENT(IN) :: space_id      ! Dataset's dataspace identifier 
    -  TYPE(hdset_reg_ref_t_f), INTENT(OUT) :: ref ! Dataset region reference 
    -  INTEGER, INTENT(OUT) :: hdferr              ! Error code 
    -
    -END SUBROUTINE h5rcreate_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Rdereference -
    Signature: -
    hid_t H5Rdereference(hid_t dataset, - H5R_type_t ref_type, - void *ref - ) -
    Purpose: -
    Opens the HDF5 object referenced. -
    Description: -
    Given a reference to some object, H5Rdereference - opens that object and returns an identifier. -

    - The parameter ref_type specifies the reference type - of ref. - ref_type may contain either of the following values: -

      -
    • H5R_OBJECT (0) -
    • H5R_DATASET_REGION (1) -
    -
    Parameters: -
      - - - - - - - - - -
      hid_t datasetIN: Dataset containing reference object.
      H5R_type_t ref_type    IN: The reference type of ref.
      void *refIN: Reference to open.
    -
    Returns: -
    Returns valid identifier if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5rdereference_f - -

    To dereference an object -

    -
    -SUBROUTINE h5rdereference_f(dset_id, ref, obj_id, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: dset_id   ! Dataset identifier 
    -  TYPE(hobj_ref_t_f), INTENT(IN) :: ref   ! Object reference 
    -  INTEGER(HID_T), INTENT(OUT) :: obj_id   ! Object identifier 
    -  INTEGER, INTENT(OUT) :: hdferr          ! Error code 
    -
    -END SUBROUTINE h5rdereference_f
    -	
    - - -
    To dereference a region -
    -
    -SUBROUTINE h5rdereference_f(dset_id, ref, obj_id, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: dset_id        ! Dataset identifier 
    -  TYPE(hdset_reg_ref_t_f), INTENT(IN) :: ref   ! Object reference 
    -  INTEGER(HID_T), INTENT(OUT) :: obj_id        ! Object identifier 
    -  INTEGER, INTENT(OUT) :: hdferr               ! Error code 
    -          
    -END SUBROUTINE h5rdereference_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Rget_obj_type -
    Signature: -
    H5G_obj_t H5Rget_obj_type(hid_t id, - H5R_type_t ref_type, - void *ref - ) -
    Purpose: -
    Retrieves the type of object that an object reference points to. -
    Description: -
    Given type of object reference, ref_type, - and a reference to an object, ref, - H5Rget_obj_type - returns the type of the referenced object. -

    - Valid object reference types, to pass in as ref_type, - include the following: - - - -
         - H5R_OBJECT - Reference is an object reference. -
      - H5R_DATASET_REGION   - Reference is a dataset region reference. -
    -

    - Valid object type return values include the following: - - - - - -
         - H5G_LINK - Object is a symbolic link. -
      - H5G_GROUP - Object is a group. -
      - H5G_DATASET   - Object is a dataset. -
      - H5G_TYPE - Object is a named datatype. -
    -

    Parameters: -
      - - - - - - - - - -
      hid_t id,IN: The dataset containing the reference object or - the location identifier of the object that the - dataset is located within.
      H5R_type_t ref_type    IN: Type of reference to query.
      void *refIN: Reference to query.
    -
    Returns: -
    Returns an object type as defined in H5Gpublic.h if successful; - otherwise returns H5G_UNKNOWN. -
    Fortran90 Interface: h5rget_object_type_f -
    -
    -SUBROUTINE h5rget_object_type_f(dset_id, ref, obj_type, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: dset_id   ! Dataset identifier 
    -  TYPE(hobj_ref_t_f), INTENT(IN) :: ref   ! Object reference 
    -  INTEGER, INTENT(OUT) :: obj_type        ! Object type  
    -                                          !     H5G_UNKNOWN_F (-1)
    -                                          !     H5G_LINK_F      0
    -                                          !     H5G_GROUP_F     1
    -                                          !     H5G_DATASET_F   2
    -                                          !     H5G_TYPE_F      3
    -  INTEGER, INTENT(OUT) :: hdferr          ! Error code 
    -
    -END SUBROUTINE h5rget_object_type_f
    -	
    - - -
    - - - - -
    -
    -
    Name: H5Rget_region -
    Signature: -
    hid_t H5Rget_region(hid_t dataset, - H5R_type_t ref_type, - void *ref - ) -
    Purpose: -
    Retrieves a dataspace with the specified region selected. -
    Description: -
    Given a reference to an object ref, - H5Rget_region creates a copy of the dataspace - of the dataset pointed to and defines a selection in the copy - which is the region pointed to. -

    - The parameter ref_type specifies the reference type - of ref. - ref_type may contain the following value: -

      -
    • H5R_DATASET_REGION (1) -
    -
    Parameters: -
      - - - - - - - - - -
      hid_t datasetIN: Dataset containing reference object.
      H5R_type_t ref_type    IN: The reference type of ref.
      void *refIN: Reference to open.
    -
    Returns: -
    Returns a valid identifier if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5rget_region_f -
    -
    -SUBROUTINE h5rget_region_f(dset_id, ref, space_id, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: dset_id       ! Dataset identifier 
    -  TYPE(hdset_reg_ref_t_f), INTENT(IN) :: ref  ! Dataset region reference 
    -  INTEGER(HID_T), INTENT(OUT) :: space_id     ! Space identifier 
    -  INTEGER, INTENT(OUT) :: hdferr              ! Error code
    - 
    -END SUBROUTINE h5rget_region_f
    -	
    - - -
    - -
    -
    - - - -
    -HDF5 documents and links 
    -Introduction to HDF5 
    -HDF5 User Guide 
    - -
    -And in this document, the -HDF5 Reference Manual   -
    -H5IM   -H5LT   -H5PT   -H5TB   -
    -H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5I   -H5P   -
    -H5R   -H5S   -H5T   -H5Z   -Tools   -Datatypes   -
    -
    -
    -HDF Help Desk -
    -Describes HDF5 Release 1.7, the unreleased development branch; working toward HDF5 Release 1.8.0 -
    - - - diff --git a/doc/html/RM_H5S.html b/doc/html/RM_H5S.html deleted file mode 100644 index 6f8aaba..0000000 --- a/doc/html/RM_H5S.html +++ /dev/null @@ -1,1884 +0,0 @@ - - -HDF5/H5S API Specification - - - - - - - - - - - - - -
    -
    - - - -
    -HDF5 documents and links 
    -Introduction to HDF5 
    -HDF5 User Guide 
    - -
    -And in this document, the -HDF5 Reference Manual   -
    -H5IM   -H5LT   -H5PT   -H5TB   -
    -H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5I   -H5P   -
    -H5R   -H5S   -H5T   -H5Z   -Tools   -Datatypes   -
    -
    -
    -

    H5S: Dataspace Interface

    -
    - -

    Dataspace Object API Functions

    - -These functions create and manipulate the dataspace in which to store the -elements of a dataset. - - -

    -The C Interfaces: - - - -
    -
  • H5Screate -
  • H5Scopy -
  • H5Sclose -
  • H5Screate_simple -
  • H5Sis_simple -
  • H5Soffset_simple -
  • H5Sget_simple_extent_dims -
  • H5Sget_simple_extent_ndims - -
  •        -
  • H5Sget_simple_extent_npoints -
  • H5Sget_simple_extent_type - -
  • H5Sextent_copy -
  • H5Sset_extent_simple -
  • H5Sset_extent_none -
  • H5Sget_select_type -
  • H5Sget_select_npoints -
  • H5Sget_select_hyper_nblocks -
  • H5Sget_select_hyper_blocklist -
  •        -
  • H5Sget_select_elem_npoints -
  • H5Sget_select_elem_pointlist -
  • H5Sget_select_bounds -
  • H5Sselect_elements -
  • H5Sselect_all -
  • H5Sselect_none -
  • H5Sselect_valid -
  • H5Sselect_hyperslab - - - -
  • -
    - -Alphabetical Listing - - - - - - - - - - - - - -
    -
  • H5Sclose -
  • H5Scopy -
  • H5Screate -
  • H5Screate_simple -
  • H5Sextent_copy -
  • H5Sget_select_bounds -
  • H5Sget_select_elem_npoints -
  • H5Sget_select_elem_pointlist -
  • H5Sget_select_hyper_blocklist -
  •        -
  • H5Sget_select_hyper_nblocks -
  • H5Sget_select_npoints -
  • H5Sget_select_type -
  • H5Sget_simple_extent_dims -
  • H5Sget_simple_extent_ndims -
  • H5Sget_simple_extent_npoints -
  • H5Sget_simple_extent_type -
  • H5Sis_simple -
  • H5Soffset_simple -
  •        - -
  • H5Sselect_all -
  • H5Sselect_elements -
  • H5Sselect_hyperslab -
  • H5Sselect_none -
  • H5Sselect_valid -
  • H5Sset_extent_none -
  • H5Sset_extent_simple -
  • - -
    -The FORTRAN90 Interfaces: -
    -In general, each FORTRAN90 subroutine performs exactly the same task -as the corresponding C function. -
    - - - -
    -
  • h5screate_f -
  • h5scopy_f -
  • h5sclose_f -
  • h5screate_simple_f -
  • h5sis_simple_f -
  • h5soffset_simple_f -
  • h5sget_simple_extent_dims_f -
  • h5sget_simple_extent_ndims_f -
  •        -
  • h5sget_simple_extent_npoints_f -
  • h5sget_simple_extent_type_f - -
  • h5sextent_copy_f -
  • h5sset_extent_simple_f -
  • h5sset_extent_none_f -
  • h5sget_select_type_f -
  • h5sget_select_npoints_f -
  • h5sget_select_hyper_nblocks_f -
  • h5sget_select_hyper_blocklist_f - -
  •        -
  • h5sget_select_elem_npoints_f -
  • h5sget_select_elem_pointlist_f -
  • h5sselect_elements_f -
  • h5sselect_all_f -
  • h5sselect_none_f -
  • h5sselect_valid_f -
  • h5sselect_hyperslab_f - - - -
  • - - - - - - - - -


    -
    -
    Name: H5Sclose -
    Signature: -
    herr_t H5Sclose(hid_t space_id - ) -
    Purpose: -
    Releases and terminates access to a dataspace. -
    Description: -
    H5Sclose releases a dataspace. - Further access through the dataspace identifier is illegal. - Failure to release a dataspace with this call will - result in resource leaks. -
    Parameters: -
      - - - -
      hid_t space_id    Identifier of dataspace to release.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5sclose_f -
    -
    -SUBROUTINE h5sclose_f(space_id, hdferr)     
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: space_id  ! Dataspace identifier
    -  INTEGER, INTENT(OUT) :: hdferr          ! Error code
    -                                          ! 0 on success and -1 on failure
    -END SUBROUTINE h5sclose_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Scopy -
    Signature: -
    hid_t H5Scopy(hid_t space_id - ) -
    Purpose: -
    Creates an exact copy of a dataspace. -
    Description: -
    H5Scopy creates a new dataspace which is an exact - copy of the dataspace identified by space_id. - The dataspace identifier returned from this function should be - released with H5Sclose or resource leaks will occur. -
    Parameters: -
      - - - -
      hid_t space_id    Identifier of dataspace to copy.
    -
    Returns: -
    Returns a dataspace identifier if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5scopy_f -
    -
    -SUBROUTINE h5scopy_f(space_id, new_space_id, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: space_id      ! Dataspace identifier 
    -  INTEGER(HID_T), INTENT(OUT) :: new_space_id ! Identifier of dataspace copy 
    -  INTEGER, INTENT(OUT) :: hdferr              ! Error code
    -                                              ! 0 on success and -1 on failure
    -END SUBROUTINE h5scopy_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Screate -
    Signature: -
    hid_t H5Screate(H5S_class_t type) -
    Purpose: -
    Creates a new dataspace of a specified type. -
    Description: -
    H5Screate creates a new dataspace of a particular - type. - The types currently supported are H5S_SCALAR and - H5S_SIMPLE; - others are planned to be added later. -
    Parameters: -
      - - - -
      H5S_class_t type    The type of dataspace to be created.
    -
    Returns: -
    Returns a dataspace identifier if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5screate_f -
    -
    -SUBROUTINE h5screate_f(classtype, space_id, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER, INTENT(IN) :: classtype        ! The type of the dataspace
    -                                          ! to be created. Possible values
    -                                          ! are: 
    -                                          !    H5S_SCALAR_F 
    -                                          !    H5S_SIMPLE_F 
    -  INTEGER(HID_T), INTENT(OUT) :: space_id ! Dataspace identifier 
    -  INTEGER, INTENT(OUT) :: hdferr          ! Error code
    -                                          ! 0 on success and -1 on failure
    -END SUBROUTINE h5screate_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Screate_simple -
    Signature: -
    hid_t H5Screate_simple(int rank, - const hsize_t * dims, - const hsize_t * maxdims - ) -
    Purpose: -
    Creates a new simple dataspace and opens it for access. -
    Description: - -
    H5Screate_simple creates a new simple dataspace - and opens it for access. -

    - rank is the number of dimensions used in the dataspace. -

    - dims is an array specifying the size of each dimension - of the dataset while - maxdims is an array specifying the upper limit on - the size of each dimension. - maxdims may be the null pointer, in which case the - upper limit is the same as dims. -

    - If an element of maxdims is - H5S_UNLIMITED, (-1), - the maximum size of the corresponding dimension is unlimited. - Otherwise, no element of maxdims should be - smaller than the corresponding element of dims. -

    - The dataspace identifier returned from this function must be - released with H5Sclose or resource leaks will occur. -

    Parameters: -
      - - - - - - - - - -
      int rankNumber of dimensions of dataspace.
      const hsize_t * dimsAn array of the size of each dimension.
      const hsize_t * maxdims    An array of the maximum size of each dimension.
    -
    Returns: -
    Returns a dataspace identifier if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5screate_simple_f -
    -
    -SUBROUTINE h5screate_simple_f(rank, dims, space_id, hdferr, maxdims) 
    -  IMPLICIT NONE
    -  INTEGER, INTENT(IN) :: rank             ! Number of dataspace dimensions 
    -  INTEGER(HSIZE_T), INTENT(IN) :: dims(*) ! Array with the dimension sizes 
    -  INTEGER(HID_T), INTENT(OUT) :: space_id ! Dataspace identifier 
    -  INTEGER, INTENT(OUT) :: hdferr          ! Error code
    -                                          ! 0 on success and -1 on failure
    -  INTEGER(HSIZE_T), OPTIONAL, INTENT(IN) :: maxdims(*) 
    -                                          ! Array with the maximum 
    -                                          ! dimension sizes 
    -END SUBROUTINE h5screate_simple_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Sextent_copy -
    Signature: -
    herr_t H5Sextent_copy(hid_t dest_space_id, - hid_t source_space_id - ) -
    Purpose: -
    Copies the extent of a dataspace. -
    Description: -
    H5Sextent_copy copies the extent from - source_space_id to dest_space_id. - This action may change the type of the dataspace. -
    Parameters: -
      - - - - - - -
      hid_t dest_space_idIN: The identifier for the dataspace to which - the extent is copied.
      hid_t source_space_id    IN: The identifier for the dataspace from which - the extent is copied.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5sextent_copy_f -
    -
    -SUBROUTINE h5sextent_copy_f(dest_space_id, source_space_id, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: dest_space_id   ! Identifier of destination
    -                                                ! dataspace
    -  INTEGER(HID_T), INTENT(IN) :: source_space_id ! Identifier of source 
    -                                                ! dataspace
    -  INTEGER, INTENT(OUT) :: hdferr                ! Error code
    -                                                ! 0 on success and -1 on failure 
    -END SUBROUTINE h5sextent_copy_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Sget_select_bounds -
    Signature: -
    herr_t H5Sget_select_bounds(hid_t space_id, - hsize_t *start, - hsize_t *end - ) -
    Purpose: -
    Gets the bounding box containing the current selection. -
    Description: -
    H5Sget_select_bounds retrieves the coordinates of - the bounding box containing the current selection and places - them into user-supplied buffers. -

    - The start and end buffers must be large - enough to hold the dataspace rank number of coordinates. -

    - The bounding box exactly contains the selection. - I.e., if a 2-dimensional element selection is currently - defined as containing the points (4,5), (6,8), and (10,7), - then the bounding box will be (4, 5), (10, 8). -

    - The bounding box calculation includes the current offset of the - selection within the dataspace extent. -

    - Calling this function on a none selection will - return FAIL. -

    Parameters: -
      - - - - - - - - - -
      hid_t space_id    IN: Identifier of dataspace to query.
      hsize_t *startOUT: Starting coordinates of the bounding box.
      hsize_t *endOUT: Ending coordinates of the bounding box, - i.e., the coordinates of the diagonally opposite corner.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: -
    -
    -SUBROUTINE  h5sget_select_bounds_f(space_id, start, end, hdferr)
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: space_id 
    -                                   ! Dataspace identifier 
    -  INTEGER(HSIZE_T), DIMENSION(*), INTENT(OUT) :: start
    -                                   ! Starting coordinates of the bounding box 
    -  INTEGER(HSIZE_T), DIMENSION(*), INTENT(OUT) :: end
    -                                   ! Ending coordinates of the bounding box,
    -                                   ! i.e., the coordinates of the diagonally 
    -                                   ! opposite corner 
    -  INTEGER, INTENT(OUT) :: hdferr   ! Error code
    -END SUBROUTINE h5sget_select_bounds_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Sget_select_elem_npoints -
    Signature: -
    hssize_t H5Sget_select_elem_npoints(hid_t space_id - ) -
    Purpose: -
    Gets the number of element points in the current selection. -
    Description: -
    H5Sget_select_elem_npoints returns - the number of element points in the current dataspace selection. -
    Parameters: -
      - - - -
      hid_t space_id    IN: Identifier of dataspace to query.
    -
    Returns: -
    Returns the number of element points in the current dataspace selection if successful. - Otherwise returns a negative value. -
    Fortran90 Interface: h5sget_select_elem_npoints_f -
    -
    -SUBROUTINE h5sget_select_elem_npoints_f(space_id, num_points, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: space_id ! Dataspace identifier
    -  INTEGER, INTENT(OUT) :: num_points     ! Number of points in 
    -                                         ! the current elements selection
    -  INTEGER, INTENT(OUT) :: hdferr         ! Error code
    -END SUBROUTINE h5sget_select_elem_npoints_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Sget_select_elem_pointlist -
    Signature: -
    herr_t H5Sget_select_elem_pointlist(hid_t space_id, - hsize_t startpoint, - hsize_t numpoints, - hsize_t *buf - ) -
    Purpose: -
    Gets the list of element points currently selected. -
    Description: -
    H5Sget_select_elem_pointlist returns the list of - element points in the current dataspace selection. Starting with - the startpoint-th point in the list of points, - numpoints points are put into the user's buffer. - If the user's buffer fills up before numpoints - points are inserted, the buffer will contain only as many - points as fit. -

    - The element point coordinates have the same dimensionality (rank) - as the dataspace they are located within. The list of element points - is formatted as follows: -
         - <coordinate>, followed by -
         - the next coordinate, -
         - etc. -
    - until all of the selected element points have been listed. -

    - The points are returned in the order they will be iterated through - when the selection is read/written from/to disk. -

    Parameters: -
      - - - - - - - - - - - - -
      hid_t space_idIN: Dataspace identifier of selection to query.
      hsize_t startpoint    IN: Element point to start with.
      hsize_t numpointsIN: Number of element points to get.
      hsize_t *bufOUT: List of element points selected.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5sget_select_elem_pointlist_f -
    -
    -SUBROUTINE h5sget_select_elem_pointlist_f(space_id, startpoint, num_points,
    -                                          buf, hdferr)
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN)   :: space_id   ! Dataspace identifier
    -  INTEGER(HSIZE_T), INTENT(IN) :: startpoint ! Element point to start with
    -  INTEGER, INTENT(OUT) :: num_points         ! Number of points to get in 
    -                                             ! the current element selection
    -  INTEGER(HSIZE_T), DIMENSION(*), INTENT(OUT) :: buf
    -                                             ! List of points selected 
    -  INTEGER, INTENT(OUT) :: hdferr             ! Error code
    -END SUBROUTINE h5sget_select_elem_pointlist_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Sget_select_hyper_blocklist -
    Signature: -
    herr_t H5Sget_select_hyper_blocklist(hid_t space_id, - hsize_t startblock, - hsize_t numblocks, - hsize_t *buf - ) -
    Purpose: -
    Gets the list of hyperslab blocks currently selected. -
    Description: -
    H5Sget_select_hyper_blocklist returns a list of - the hyperslab blocks currently selected. Starting with the - startblock-th block in the list of blocks, - numblocks blocks are put into the user's buffer. - If the user's buffer fills up before numblocks - blocks are inserted, the buffer will contain only as many - blocks as fit. -

    - The block coordinates have the same dimensionality (rank) - as the dataspace they are located within. The list of blocks - is formatted as follows: -
         - <"start" coordinate>, immediately followed by -
         - <"opposite" corner coordinate>, followed by -
         - the next "start" and "opposite" coordinates, -
         - etc. -
    - until all of the selected blocks have been listed. -

    - No guarantee is implied as the order in which blocks are listed. -

    Parameters: -
      - - - - - - - - - - - - -
      hid_t space_idIN: Dataspace identifier of selection to query.
      hsize_t startblock    IN: Hyperslab block to start with.
      hsize_t numblocksIN: Number of hyperslab blocks to get.
      hsize_t *bufOUT: List of hyperslab blocks selected.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5sget_select_hyper_blocklist_f -
    -
    -SUBROUTINE h5sget_select_hyper_blocklist_f(space_id, startblock, num_blocks,
    -                                           buf, hdferr)
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN)   :: space_id   ! Dataspace identifier
    -  INTEGER(HSIZE_T), INTENT(IN) :: startblock ! Hyperslab block to start with
    -  INTEGER, INTENT(OUT) :: num_blocks         ! Number of hyperslab blocks to 
    -                                             ! get in the current hyperslab 
    -                                             ! selection
    -  INTEGER(HSIZE_T), DIMENSION(*), INTENT(OUT) :: buf
    -                                             ! List of hyperslab blocks selected
    -  INTEGER, INTENT(OUT) :: hdferr             ! Error code
    -END SUBROUTINE h5sget_select_hyper_blocklist_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Sget_select_hyper_nblocks -
    Signature: -
    hssize_t H5Sget_select_hyper_nblocks(hid_t space_id - ) -
    Purpose: -
    Get number of hyperslab blocks. -
    Description: -
    H5Sget_select_hyper_nblocks returns the - number of hyperslab blocks in the current dataspace selection. -
    Parameters: -
      - - - -
      hid_t space_id    IN: Identifier of dataspace to query.
    -
    Returns: -
    Returns the number of hyperslab blocks in - the current dataspace selection if successful. - Otherwise returns a negative value. -
    Fortran90 Interface: h5sget_select_hyper_nblocks_f -
    -
    -SUBROUTINE h5sget_select_hyper_nblocks_f(space_id, num_blocks, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: space_id ! Dataspace identifier
    -  INTEGER, INTENT(OUT) :: num_blocks     ! Number of hyperslab blocks in 
    -                                         ! the current hyperslab selection
    -  INTEGER, INTENT(OUT) :: hdferr         ! Error code
    -END SUBROUTINE h5sget_select_hyper_nblocks_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Sget_select_npoints -
    Signature: -
    hssize_t H5Sget_select_npoints(hid_t space_id) -
    Purpose: -
    Determines the number of elements in a dataspace selection. -
    Description: -
    H5Sget_select_npoints determines the number of elements - in the current selection of a dataspace. -
    Parameters: -
      - - - -
      hid_t space_id    Dataspace identifier.
    -
    Returns: -
    Returns the number of elements in the selection if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5sget_select_npoints_f -
    -
    -SUBROUTINE h5sget_select_npoints_f(space_id, npoints, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: space_id     ! Dataspace identifier 
    -  INTEGER(HSSIZE_T), INTENT(OUT) :: npoints  ! Number of elements in the
    -                                             ! selection 
    -  INTEGER, INTENT(OUT) :: hdferr             ! Error code
    -                                             ! 0 on success and -1 on failure 
    -END SUBROUTINE h5sget_select_npoints_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Sget_select_type -
    Signature: -
    H5S_sel_type H5Sget_select_type(hid_t space_id) -
    Purpose: -
    Determines the type of the dataspace selection. -
    Description: -
    H5Sget_select_type retrieves the - type of selection currently defined for the dataspace - space_id. -
    Parameters: -
      - - - -
      hid_t space_id    Dataspace identifier.
    -
    Returns: -
    Returns the dataspace selection type, a value of - the enumerated datatype H5S_sel_type, - if successful. - Valid return values are as follows: -
    - - -
    - H5S_SEL_NONE - - No selection is defined. -
    - H5S_SEL_POINTS - - A sequence of points is selected. -
    - H5S_SEL_HYPERSLABS - - A hyperslab or compound hyperslab is selected. -
    - H5S_SEL_ALL - - The entire dataset is selected. -
    -
    - Otherwise returns a negative value. -
    Fortran90 Interface: h5sget_select_type_f -
    -
    -SUBROUTINE h5sget_select_type_f(space_id, type, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: space_id ! Dataspace identifier
    -  INTEGER, INTENT(OUT) :: type           ! Selection type
    -                                         ! Valid values are:
    -                                         !    H5S_SEL_ERROR_F 
    -                                         !    H5S_SEL_NONE_F 
    -                                         !    H5S_SEL_POINTS_F 
    -                                         !    H5S_SEL_HYPERSLABS_F 
    -                                         !    H5S_SEL_ALL_F 
    -  INTEGER, INTENT(OUT) :: hdferr         ! Error code
    -END SUBROUTINE h5sget_select_type_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Sget_simple_extent_dims -
    Signature: -
    int H5Sget_simple_extent_dims(hid_t space_id, - hsize_t *dims, - hsize_t *maxdims - ) -
    Purpose: -
    Retrieves dataspace dimension size and maximum size. -
    Description: -
    H5Sget_simple_extent_dims returns the size and maximum sizes - of each dimension of a dataspace through the dims - and maxdims parameters. -

    - Either or both of dims and maxdims - may be NULL. -

    - If a value in the returned array maxdims is - H5S_UNLIMITED (-1), - the maximum size of that dimension is unlimited. -

    Parameters: -
      - - - - - - - - - -
      hid_t space_idIN: Identifier of the dataspace object to query
      hsize_t *dimsOUT: Pointer to array to store the size of each dimension.
      hsize_t *maxdims    OUT: Pointer to array to store the maximum size of each dimension.
    -
    Returns: -
    Returns the number of dimensions in the dataspace if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5sget_simple_extent_dims_f -
    -
    -SUBROUTINE h5sget_simple_extent_dims_f(space_id, dims, maxdims, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: space_id   ! Dataspace identifier 
    -  INTEGER(HSIZE_T), DIMENSION(*), INTENT(OUT) :: dims 
    -                                           ! Array to store dimension sizes 
    -  INTEGER(HSIZE_T), DIMENSION(*), INTENT(OUT) :: maxdims 
    -                                           ! Array to store max dimension sizes
    -  INTEGER, INTENT(OUT) :: hdferr           ! Error code
    -                                           ! Dataspace rank on success 
    -                                           ! and -1 on failure
    -END SUBROUTINE h5sget_simple_extent_dims_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Sget_simple_extent_ndims -
    Signature: -
    int H5Sget_simple_extent_ndims(hid_t space_id) -
    Purpose: -
    Determines the dimensionality of a dataspace. -
    Description: -
    H5Sget_simple_extent_ndims determines the dimensionality (or rank) - of a dataspace. -
    Parameters: -
      - - - -
      hid_t space_id    Identifier of the dataspace
    -
    Returns: -
    Returns the number of dimensions in the dataspace if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5sget_simple_extent_ndims_f -
    -
    -SUBROUTINE h5sget_simple_extent_ndims_f(space_id, rank, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: space_id   ! Dataspace identifier 
    -  INTEGER, INTENT(OUT) :: rank             ! Number of dimensions 
    -  INTEGER, INTENT(OUT) :: hdferr           ! Error code
    -                                           ! 0 on success and -1 on failure
    -END SUBROUTINE h5sget_simple_extent_ndims_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Sget_simple_extent_npoints -
    Signature: -
    hssize_t H5Sget_simple_extent_npoints(hid_t space_id) -
    Purpose: -
    Determines the number of elements in a dataspace. -
    Description: -
    H5Sget_simple_extent_npoints determines the number of elements - in a dataspace. For example, a simple 3-dimensional dataspace - with dimensions 2, 3, and 4 would have 24 elements. -
    Parameters: -
      - - - -
      hid_t space_id    ID of the dataspace object to query
    -
    Returns: -
    Returns the number of elements in the dataspace if successful; - otherwise returns 0. -
    Fortran90 Interface: h5sget_simple_extent_npoints_f -
    -
    -SUBROUTINE h5sget_simple_extent_npoints_f(space_id, npoints, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: space_id    ! Dataspace identifier 
    -  INTEGER(HSIZE_T), INTENT(OUT) :: npoints  ! Number of elements in dataspace
    -  INTEGER, INTENT(OUT) :: hdferr            ! Error code
    -                                            ! 0 on success and -1 on failure
    -END SUBROUTINE h5sget_simple_extent_npoints_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Sget_simple_extent_type -
    Signature: -
    H5S_class_t H5Sget_simple_extent_type(hid_t space_id) -
    Purpose: -
    Determine the current class of a dataspace. -
    Description: -
    H5Sget_simple_extent_type queries a dataspace to determine the - current class of a dataspace. -

    - The function returns a class name, one of the following: - H5S_SCALAR, - H5S_SIMPLE, or - H5S_NONE. -

    Parameters: -
      - - - -
      hid_t space_id    Dataspace identifier.
    -
    Returns: -
    Returns a dataspace class name if successful; - otherwise H5S_NO_CLASS (-1). -
    Fortran90 Interface: h5sget_simple_extent_type_f -
    -
    -SUBROUTINE h5sget_simple_extent_type_f(space_id, classtype, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: space_id ! Dataspace identifier 
    -  INTEGER, INTENT(OUT) :: classtype      ! Class type 
    -                                         ! Possible values are: 
    -                                         !    H5S_NO_CLASS_F 
    -                                         !    H5S_SCALAR_F 
    -                                         !    H5S_SIMPLE_F 
    -  INTEGER, INTENT(OUT) :: hdferr         ! Error code
    -                                         ! 0 on success and -1 on failure
    -END SUBROUTINE h5sget_simple_extent_type_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Sis_simple -
    Signature: -
    htri_t H5Sis_simple(hid_t space_id) -
    Purpose: -
    Determines whether a dataspace is a simple dataspace. -
    Description: -
    H5Sis_simple determines whether a dataspace is - a simple dataspace. [Currently, all dataspace objects are simple - dataspaces, complex dataspace support will be added in the future] -
    Parameters: -
      - - - -
      hid_t space_id    Identifier of the dataspace to query
    -
    Returns: -
    When successful, returns a positive value, for TRUE, - or 0 (zero), for FALSE. - Otherwise returns a negative value. -
    Fortran90 Interface: h5sis_simple_f -
    -
    -SUBROUTINE h5sis_simple_f(space_id, flag, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: space_id    ! Dataspace identifier 
    -  LOGICAL, INTENT(OUT) :: flag              ! Flag, indicates if dataspace
    -                                            ! is simple or not: 
    -                                            ! TRUE or FALSE  
    -  INTEGER, INTENT(OUT) :: hdferr            ! Error code
    -                                            ! 0 on success and -1 on failure 
    -END SUBROUTINE h5sis_simple_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Soffset_simple -
    Signature: -
    herr_t H5Soffset_simple(hid_t space_id, - const hssize_t *offset - ) -
    Purpose: -
    Sets the offset of a simple dataspace. -
    Description: -
    H5Soffset_simple sets the offset of a - simple dataspace space_id. The offset - array must be the same number of elements as the number of - dimensions for the dataspace. If the offset - array is set to NULL, the offset for the dataspace - is reset to 0. -

    - This function allows the same shaped selection to be moved - to different locations within a dataspace without requiring it - to be redefined. -

    Parameters: -
      - - - - - - -
      hid_t space_idIN: The identifier for the dataspace object to reset.
      const hssize_t *offset    IN: The offset at which to position the selection.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5soffset_simple_f -
    -
    -SUBROUTINE h5soffset_simple_f(space_id, offset, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: space_id    ! Dataspace identifier 
    -  INTEGER(HSSIZE_T), DIMENSION(*), INTENT(IN) ::  offset
    -                                            ! The offset at which to position
    -                                            ! the selection  
    -  INTEGER, INTENT(OUT) :: hdferr            ! Error code
    -                                            ! 0 on success and -1 on failure
    -END SUBROUTINE h5soffset_simple_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Sselect_all -
    Signature: -
    herr_t H5Sselect_all(hid_t space_id) -
    Purpose: -
    Selects the entire dataspace. -
    Description: -
    H5Sselect_all selects the entire extent - of the dataspace space_id. -

    - More specifically, H5Sselect_all selects - the special 5S_SELECT_ALL region for the dataspace - space_id. H5S_SELECT_ALL selects the - entire dataspace for any dataspace it is applied to. -

    Parameters: -
      - - - -
      hid_t space_id    IN: The identifier for the dataspace in which the - selection is being made.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5sselect_all_f -
    -
    -SUBROUTINE h5sselect_all_f(space_id, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: space_id  ! Dataspace identifier 
    -  INTEGER, INTENT(OUT) :: hdferr          ! Error code
    -                                          ! 0 on success and -1 on failure
    -END SUBROUTINE h5sselect_all_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Sselect_elements -
    Signature: -
    herr_t H5Sselect_elements(hid_t space_id, - H5S_seloper_t op, - const size_t num_elements, - const hsize_t *coord[ ] - ) -
    Purpose: -
    Selects array elements to be included in the selection for a dataspace. -
    Description: -
    H5Sselect_elements selects array elements to be - included in the selection for the space_id dataspace. -

    - The number of elements selected is set in the - num_elements parameter. -

    - The coord array is a two-dimensional array of - size dataspace_rank by num_elements - containing a list of of zero-based values specifying the - coordinates in the dataset of the selected elements. - The order of the element coordinates in the - coord array specifies the order in which - the array elements are iterated through when I/O is performed. - Duplicate coordinate locations are not checked for. -

    - The selection operator op determines how the - new selection is to be combined with the previously existing - selection for the dataspace. - The following operators are supported: -

    - - -
    - H5S_SELECT_SET - - Replaces the existing selection with the parameters from - this call. - Overlapping blocks are not supported with this operator. - Adds the new selection to the existing selection. -
    - H5S_SELECT_APPEND - - Adds the new selection following the last element of the - existing selection. -
    - H5S_SELECT_PREPEND   - - Adds the new selection preceding the first element of the - existing selection. -
    -
    - -
    Parameters: -
      - - - - - - - - - - - - -
      hid_t space_idIdentifier of the dataspace.
      H5S_seloper_t opOperator specifying how the new selection is to be - combined with the existing selection for the dataspace.
      const size_t num_elements    Number of elements to be selected.
      const hsize_t *coord[ ]A 2-dimensional array of 0-based values specifying the - coordinates of the elements being selected.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5sselect_elements_f -
    -
    -SUBROUTINE h5sselect_elements_f(space_id, operator, num_elements,
    -                                coord, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: space_id  ! Dataspace identifier 
    -  INTEGER, INTENT(IN) :: op               ! Flag, valid values are:
    -                                          !    H5S_SELECT_SET_F 
    -                                          !    H5S_SELECT_OR_F 
    -  INTEGER, INTENT(IN) :: num_elements     ! Number of elements to be selected
    -  INTEGER(HSIZE_T), DIMENSION(*,*), INTENT(IN) :: coord 
    -                                          ! Array with the coordinates
    -                                          ! of the selected elements:
    -                                          ! coord(num_elements, rank)
    - -
    -  INTEGER, INTENT(OUT) :: hdferr          ! Error code
    -                                          ! 0 on success and -1 on failure
    -END SUBROUTINE h5sselect_elements_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Sselect_hyperslab -
    Signature: -
    herr_t H5Sselect_hyperslab(hid_t space_id, - H5S_seloper_t op, - const hsize_t *start, - const hsize_t *stride, - const hsize_t *count, - const hsize_t *block - ) -
    Purpose: -
    Selects a hyperslab region to add to the current selected region. -
    Description: -
    H5Sselect_hyperslab selects a hyperslab region - to add to the current selected region for the dataspace - specified by space_id. -

    - The start, stride, count, - and block arrays must be the same size as the rank - of the dataspace. -

    - The selection operator op determines how the new - selection is to be combined with the already existing selection - for the dataspace. - The following operators are supported: -

    - - -
    - H5S_SELECT_SET - - Replaces the existing selection with the parameters from this call. - Overlapping blocks are not supported with this operator. -
    - H5S_SELECT_OR - - Adds the new selection to the existing selection. -    - (Binary OR) -
    - H5S_SELECT_AND - - Retains only the overlapping portions of the new selection and - the existing selection. -    - (Binary AND) -
    - H5S_SELECT_XOR - - Retains only the elements that are members of the new selection or - the existing selection, excluding elements that are members of - both selections. -    - (Binary exclusive-OR, XOR) -
    - H5S_SELECT_NOTB   - - Retains only elements of the existing selection that are not in - the new selection. -
    - H5S_SELECT_NOTA - - Retains only elements of the new selection that are not in - the existing selection. -
    -
    - -

    - The start array determines the starting coordinates - of the hyperslab to select. -

    - The stride array chooses array locations - from the dataspace with each value in the stride - array determining how many elements to move in each dimension. - Setting a value in the stride array to 1 moves to - each element in that dimension of the dataspace; setting a value - of 2 in alocation in the stride array - moves to every other element in that dimension of the dataspace. - In other words, the stride determines the - number of elements to move from the start location - in each dimension. - Stride values of 0 are not allowed. - If the stride parameter is NULL, - a contiguous hyperslab is selected (as if each value in the - stride array were set to all 1's). -

    - The count array determines how many blocks to - select from the dataspace, in each dimension. -

    - The block array determines - the size of the element block selected from the dataspace. - If the block parameter is set to NULL, - the block size defaults to a single element in each dimension - (as if the block array were set to all - 1's). -

    - For example, in a 2-dimensional dataspace, setting - start to [1,1], - stride to [4,4], - count to [3,7], and - block to [2,2] - selects 21 2x2 blocks of array elements starting with - location (1,1) and selecting blocks at locations - (1,1), (5,1), (9,1), (1,5), (5,5), etc. -

    - Regions selected with this function call default to C order - iteration when I/O is performed. -

    Parameters: -
      - - - - - - - - - - - - - - - - - - -
      hid_t space_idIN: Identifier of dataspace selection to modify
      H5S_seloper_t opIN: Operation to perform on current selection.
      const hsize_t *startIN: Offset of start of hyperslab
      const hsize_t *countIN: Number of blocks included in hyperslab.
      const hsize_t *stride    IN: Hyperslab stride.
      const hsize_t *blockIN: Size of block in hyperslab.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5sselect_hyperslab_f -
    -
    -SUBROUTINE h5sselect_hyperslab_f(space_id, operator, start, count,
    -                                 hdferr, stride, block) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: space_id  ! Dataspace identifier 
    -  INTEGER, INTENT(IN) :: op               ! Flag, valid values are:
    -                                          !    H5S_SELECT_SET_F
    -                                          !    H5S_SELECT_OR_F
    -  INTEGER(HSIZE_T), DIMENSION(*), INTENT(IN) :: start
    -                                          ! Starting coordinates of hyperslab 
    -  INTEGER(HSIZE_T), DIMENSION(*), INTENT(IN) :: count 
    -                                          ! Number of blocks to select 
    -                                          ! from dataspace 
    -  INTEGER, INTENT(OUT) :: hdferr          ! Error code
    -                                          ! 0 on success and -1 on failure
    -  INTEGER(HSIZE_T), DIMENSION(*), OPTIONAL, INTENT(IN) :: stride
    -                                          ! Array of how many elements to 
    -                                          ! move in each direction
    -  INTEGER(HSIZE_T), DIMENSION(*), OPTIONAL, INTENT(IN) :: block 
    -                                          ! Size of the element block 
    -END SUBROUTINE h5sselect_hyperslab_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Sselect_none -
    Signature: -
    herr_t H5Sselect_none(hid_t space_id) -
    Purpose: -
    Resets the selection region to include no elements. -
    Description: -
    H5Sselect_none resets the selection region - for the dataspace space_id to include no elements. -
    Parameters: -
      - - - -
      hid_t space_id    IN: The identifier for the dataspace in which the - selection is being reset.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5sselect_none_f -
    -
    -SUBROUTINE h5sselect_none_f(space_id, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: space_id  ! Dataspace identifier 
    -  INTEGER, INTENT(OUT) :: hdferr          ! Error code
    -                                          ! 0 on success and -1 on failure
    -END SUBROUTINE h5sselect_none_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Sselect_valid -
    Signature: -
    htri_t H5Sselect_valid(hid_t space_id) -
    Purpose: -
    Verifies that the selection is within the extent of the dataspace. -
    Description: -
    H5Sselect_valid verifies that the selection - for the dataspace space_id is within the extent - of the dataspace if the current offset for the dataspace is used. -
    Parameters: -
      - - - -
      hid_t space_id    The identifier for the dataspace being queried.
    -
    Returns: -
    Returns a positive value, for TRUE, - if the selection is contained within the extent - or 0 (zero), for FALSE, if it is not. - Returns a negative value on error conditions - such as the selection or extent not being defined. -
    Fortran90 Interface: h5sselect_valid_f -
    -
    -SUBROUTINE h5sselect_valid_f(space_id, flag, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: space_id  ! Dataspace identifier 
    -  LOGICAL, INTENT(OUT) :: flag            ! TRUE if the selection is
    -                                          ! contained within the extent,
    -                                          ! FALSE otherwise. 
    -  INTEGER, INTENT(OUT) :: hdferr          ! Error code
    -                                          ! 0 on success and -1 on failure
    -END SUBROUTINE h5sselect_valid_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Sset_extent_none -
    Signature: -
    herr_t H5Sset_extent_none(hid_t space_id) -
    Purpose: -
    Removes the extent from a dataspace. -
    Description: -
    H5Sset_extent_none removes the extent from - a dataspace and sets the type to H5S_NO_CLASS. -
    Parameters: -
      - - - -
      hid_t space_id    The identifier for the dataspace from which - the extent is to be removed.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5sset_extent_none_f -
    -
    -SUBROUTINE h5sset_extent_none_f(space_id, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: space_id  ! Dataspace identifier 
    -  INTEGER, INTENT(OUT) :: hdferr          ! Error code
    -                                          ! 0 on success and -1 on failure
    -END SUBROUTINE h5sset_extent_none_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Sset_extent_simple -
    Signature: -
    herr_t H5Sset_extent_simple(hid_t space_id, - int rank, - const hsize_t *current_size, - const hsize_t *maximum_size - ) -
    Purpose: -
    Sets or resets the size of an existing dataspace. -
    Description: -
    H5Sset_extent_simple sets or resets the size of - an existing dataspace. -

    - rank is the dimensionality, or number of - dimensions, of the dataspace. -

    - current_size is an array of size rank - which contains the new size of each dimension in the dataspace. - maximum_size is an array of size rank - which contains the maximum size of each dimension in the - dataspace. -

    - Any previous extent is removed from the dataspace, the dataspace - type is set to H5S_SIMPLE, and the extent is set as - specified. -

    Parameters: -
      - - - - - - - - - - - - -
      hid_t space_idDataspace identifier.
      int rankRank, or dimensionality, of the dataspace.
      const hsize_t *current_size    Array containing current size of dataspace.
      const hsize_t *maximum_sizeArray containing maximum size of dataspace.
    -
    Returns: -
    Returns a dataspace identifier if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5sset_extent_simple_f -
    -
    -SUBROUTINE h5sset_extent_simple_f(space_id, rank, current_size,
    -                                  maximum_size, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: space_id     ! Dataspace identifier 
    -  INTEGER, INTENT(IN) :: rank                ! Dataspace rank 
    -  INTEGER(HSIZE_T), DIMENSION(rank), INTENT(IN) :: current_size 
    -                                             ! Array with the new sizes
    -                                             ! of dimensions 
    -  INTEGER(HSIZE_T), DIMENSION(rank), INTENT(IN) ::  
    -                                             ! Array with the new maximum
    -                                             ! sizes of dimensions 
    -  INTEGER, INTENT(OUT) :: hdferr             ! Error code
    -                                             ! 0 on success and -1 on failure
    -END SUBROUTINE h5sset_extent_simple_f
    -	
    - - -
    - - - - - - - - - - - - - - - - - -
    -
    - - - -
    -HDF5 documents and links 
    -Introduction to HDF5 
    -HDF5 User Guide 
    - -
    -And in this document, the -HDF5 Reference Manual   -
    -H5IM   -H5LT   -H5PT   -H5TB   -
    -H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5I   -H5P   -
    -H5R   -H5S   -H5T   -H5Z   -Tools   -Datatypes   -
    -
    -
    -HDF Help Desk -
    -Describes HDF5 Release 1.7, the unreleased development branch; working toward HDF5 Release 1.8.0 -
    - - - diff --git a/doc/html/RM_H5T.html b/doc/html/RM_H5T.html deleted file mode 100644 index 2323aae..0000000 --- a/doc/html/RM_H5T.html +++ /dev/null @@ -1,4001 +0,0 @@ - - -HDF5/H5T API Specification - - - - - - - - - - - - - -
    -
    - - - -
    -HDF5 documents and links 
    -Introduction to HDF5 
    -HDF5 User Guide 
    - -
    -And in this document, the -HDF5 Reference Manual   -
    -H5IM   -H5LT   -H5PT   -H5TB   -
    -H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5I   -H5P   -
    -H5R   -H5S   -H5T   -H5Z   -Tools   -Datatypes   -
    -
    -
    -

    H5T: Datatype Interface

    -
    - -

    Datatype Object API Functions

    - -These functions create and manipulate the datatype which describes elements -of a dataset. - - -

    -The C Interfaces: - - - -
    -General Datatype Operations -
  • H5Tcreate -
  • H5Topen -
  • H5Tcommit -
  • H5Tcommitted -
  • H5Tcopy -
  • H5Tequal -
  • H5Tlock -
  • H5Tget_class -
  • H5Tget_size -
  • H5Tget_super -
  • H5Tget_native_type -
  • H5Tdetect_class -
  • H5Tclose -

    -Conversion Functions -
  • H5Tconvert -
  • H5Tfind -
  • H5Tset_overflow -
  • H5Tget_overflow -
  • H5Tregister -
  • H5Tunregister -
  •        -Atomic Datatype Properties -
    -
  • H5Tset_size -
  • H5Tget_order -
  • H5Tset_order -
  • H5Tget_precision -
  • H5Tset_precision -
  • H5Tget_offset -
  • H5Tset_offset -
  • H5Tget_pad -
  • H5Tset_pad -
  • H5Tget_sign -
  • H5Tset_sign -
  • -
  • H5Tget_fields -
  • H5Tset_fields -
  • H5Tget_ebias -
  • H5Tset_ebias -
  • H5Tget_norm -
  • H5Tset_norm -
  • H5Tget_inpad -
  • H5Tset_inpad -
  • H5Tget_cset -
  • H5Tset_cset -
  • H5Tget_strpad -
  • H5Tset_strpad -
  • -

    -Enumeration Datatypes -
  • H5Tenum_create -
  • H5Tenum_insert -
  • H5Tenum_nameof -
  • H5Tenum_valueof -
  • H5Tget_member_value -
  • H5Tget_nmembers -
  • H5Tget_member_name -
  • H5Tget_member_index -
  •        -Compound Datatype Properties -
  • H5Tget_nmembers -
  • H5Tget_member_class -
  • H5Tget_member_name -
  • H5Tget_member_index -
  • H5Tget_member_offset -
  • H5Tget_member_type -
  • H5Tinsert -
  • H5Tpack -

    -Array Datatypes -
  • H5Tarray_create -
  • H5Tget_array_ndims -
  • H5Tget_array_dims -

    -Variable-length Datatypes -
  • H5Tvlen_create -
  • H5Tis_variable_str -

    -Opaque Datatypes -
  • H5Tset_tag -
  • H5Tget_tag -
  • - -
    - - -Alphabetical Listing - - - - - - - - - - - - -
    -
  • H5Tarray_create -
  • H5Tclose -
  • H5Tcommit -
  • H5Tcommitted -
  • H5Tconvert -
  • H5Tcopy -
  • H5Tcreate -
  • H5Tdetect_class -
  • H5Tenum_create -
  • H5Tenum_insert -
  • H5Tenum_nameof -
  • H5Tenum_valueof -
  • H5Tequal -
  • H5Tfind -
  • H5Tget_array_dims -
  • H5Tget_array_ndims -
  • H5Tget_class -
  • H5Tget_cset -
  • H5Tget_ebias -
  • H5Tget_fields -
  • H5Tget_inpad -
  •        -
  • H5Tget_member_class -
  • H5Tget_member_index -
  • H5Tget_member_name -
  • H5Tget_member_offset -
  • H5Tget_member_type -
  • H5Tget_member_value -
  • H5Tget_native_type -
  • H5Tget_nmembers -
  • H5Tget_norm -
  • H5Tget_offset -
  • H5Tget_order -
  • H5Tget_overflow -
  • H5Tget_pad -
  • H5Tget_precision -
  • H5Tget_sign -
  • H5Tget_size -
  • H5Tget_strpad -
  • H5Tget_super -
  • H5Tget_tag -
  • H5Tinsert -
  • H5Tis_variable_str -
  •        -
  • H5Tlock -
  • H5Topen -
  • H5Tpack -
  • H5Tregister -
  • H5Tset_cset -
  • H5Tset_ebias -
  • H5Tset_fields -
  • H5Tset_inpad -
  • H5Tset_norm -
  • H5Tset_offset -
  • H5Tset_order -
  • H5Tset_overflow -
  • H5Tset_pad -
  • H5Tset_precision -
  • H5Tset_sign -
  • H5Tset_size -
  • H5Tset_strpad -
  • H5Tset_tag -
  • H5Tunregister -
  • H5Tvlen_create -
  • - -
    -The FORTRAN90 Interfaces: -
    -In general, each FORTRAN90 subroutine performs exactly the same task -as the corresponding C function. -
    - - - -
    -General Datatype Operations -
  • h5tcreate_f -
  • h5topen_f -
  • h5tcommit_f -
  • h5tcommitted_f -
  • h5tcopy_f -
  • h5tequal_f - -
  • h5tget_class_f -
  • h5tget_size_f -
  • h5tget_super_f - - -
  • h5tclose_f - - - - - - - - -

    - Enumeration Datatypes -
  • h5tenum_create_f -
  • h5tenum_insert_f -
  • h5tenum_nameof_f -
  • h5tenum_valueof_f -
  • h5tget_member_value_f -
  • h5tget_nmembers_f -
  • h5tget_member_name_f -
  • h5tget_member_index_f -
  •        -Atomic Datatype Properties -
  • h5tset_size_f -
  • h5tget_order_f -
  • h5tset_order_f -
  • h5tget_precision_f -
  • h5tset_precision_f -
  • h5tget_offset_f -
  • h5tset_offset_f -
  • h5tget_pad_f -
  • h5tset_pad_f -
  • h5tget_sign_f -
  • h5tset_sign_f -
  • h5tget_fields_f -
  • h5tset_fields_f -
  • h5tget_ebiass_f -
  • h5tset_ebiass_f -
  • h5tget_norm_f -
  • h5tset_norm_f -
  • h5tget_inpad_f -
  • h5tset_inpad_f -
  • h5tget_cset_f -
  • h5tset_cset_f -
  • h5tget_strpad_f -
  • h5tset_strpad_f - - - -
  •        - Array Datatypes -
  • h5tarray_create_f -
  • h5tget_array_ndims_f -
  • h5tget_array_dims_f -

    - Compound Datatype Properties -
  • h5tget_nmembers_f -
  • h5tget_member_class_f -
  • h5tget_member_name_f -
  • h5tget_member_index_f -
  • h5tget_member_offset_f - -
  • h5tget_member_type_f -
  • h5tinsert_f -
  • h5tpack_f - -

    - Variable-length Datatypes -
  • h5tvlen_create_f -
  • h5tis_variable_str_f -

    - Opaque Datatypes -
  • h5tset_tag_f -
  • h5tget_tag_f -
  • - - -

    -The Datatype interface, H5T, provides a mechanism to describe the - storage format of individual data points of a data set and is - hopefully designed in such a way as to allow new features to be - easily added without disrupting applications that use the data - type interface. A dataset (the H5D interface) is composed of a - collection or raw data points of homogeneous type organized - according to the data space (the H5S interface). - -

    -A datatype is a collection of datatype properties, all of - which can be stored on disk, and which when taken as a whole, - provide complete information for data conversion to or from that - datatype. The interface provides functions to set and query - properties of a datatype. - -

    -A data point is an instance of a datatype, - which is an instance of a type class. We have defined - a set of type classes and properties which can be extended at a - later time. The atomic type classes are those which describe - types which cannot be decomposed at the datatype interface - level; all other classes are compound. - -

    -See The Datatype Interface (H5T) -in the HDF5 User's Guide for further information, including a complete list of all supported datatypes. - - - - - -


    -
    -
    Name: H5Tarray_create -
    Signature: -
    hid_t H5Tarray_create( - hid_t base, - int rank, - const hsize_t dims[/*rank*/], - const int perm[/*rank*/] - ) -
    Purpose: -
    Creates an array datatype object. -
    Description: -
    H5Tarray_create creates a new array datatype object. -

    - base is the datatype of every element of the array, - i.e., of the number at each position in the array. -

    - rank is the number of dimensions and the - size of each dimension is specified in the array dims. - The value of rank is currently limited to - H5S_MAX_RANK and must be greater than 0 - (zero). - All dimension sizes specified in dims must be greater - than 0 (zero). -

    - The array perm is designed to contain the dimension - permutation, i.e. C versus FORTRAN array order. -   - (The parameter perm is currently unused and is not yet implemented.) - -

    Parameters: -
      - - - - - - - - - - - - -
      hid_t baseIN: Datatype identifier for the array base datatype.
      int rankIN: Rank of the array.
      const hsize_t dims[/*rank*/]    IN: Size of each array dimension.
      const int perm[/*rank*/]IN: Dimension permutation. -   - (Currently not implemented.)
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5tarray_create_f -
    -
    -SUBROUTINE h5tarray_create_f(base_id, rank, dims, type_id, hdferr)
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: base_id   ! Identifier of array base datatype
    -  INTEGER, INTENT(IN)        ::  rank     ! Rank of the array
    -  INTEGER(HSIZE_T), DIMENSION(*), INTENT(IN) :: dims 
    -                                          ! Sizes of each array dimension
    -  INTEGER(HID_T), INTENT(OUT) :: type_id  ! Identifier of the array datatype
    -  INTEGER, INTENT(OUT)        :: hdferr   ! Error code
    -END SUBROUTINE h5tarray_create_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Tclose -
    Signature: -
    herr_t H5Tclose(hid_t type_id - ) -
    Purpose: -
    Releases a datatype. -
    Description: -
    H5Tclose releases a datatype. Further access - through the datatype identifier is illegal. Failure to release - a datatype with this call will result in resource leaks. -
    Parameters: -
      - - - -
      hid_t type_id    Identifier of datatype to release.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5tclose_f -
    -
    -SUBROUTINE h5tclose_f(type_id, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: type_id ! Datatype identifier 
    -  INTEGER, INTENT(OUT) :: hdferr        ! Error code
    -                                        ! 0 on success and -1 on failure
    -END SUBROUTINE h5tclose_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Tcommit -
    Signature: -
    herr_tH5Tcommit(hid_t loc_id, - const char * name, - hid_t type - ) -
    Purpose: -
    Commits a transient datatype to a file, creating a new named datatype. -
    Description: -
    H5Tcommit commits a transient datatype - (not immutable) to a file, turned it into a named datatype. - The loc_id is either a file or group identifier - which, when combined with name, refers to a new - named datatype. -
    Parameters: -
      - - - - - - - - - -
      hid_t loc_idIN: A file or group identifier.
      const char * name    IN: A datatype name.
      hid_t typeIN: A datatype identifier.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5tcommit_f -
    -
    -SUBROUTINE h5tcommit_f(loc_id, name, type_id, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: loc_id  ! File or group identifier 
    -  CHARACTER(LEN=*), INTENT(IN) :: name  ! Datatype name within file or group
    -  INTEGER(HID_T), INTENT(IN) :: type_id ! Datatype identifier 
    -  INTEGER, INTENT(OUT) :: hdferr        ! Error code
    -                                        ! 0 on success and -1 on failure
    -END SUBROUTINE h5tcommit_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Tcommitted -
    Signature: -
    htri_tH5Tcommitted(hid_t type) -
    Purpose: -
    Determines whether a datatype is a named type or a transient type. -
    Description: -
    H5Tcommitted queries a type to determine whether - the type specified by the type identifier - is a named type or a transient type. If this function returns - a positive value, then the type is named (that is, it has been - committed, perhaps by some other application). Datasets which - return committed datatypes with H5Dget_type() are - able to share the datatype with other datasets in the same file. -
    Parameters: -
      - - - -
      hid_t type    IN: Datatype identifier.
    -
    Returns: -
    When successful, returns a positive value, for TRUE, - if the datatype has been committed, or 0 (zero), - for FALSE, if the datatype has not been committed. - Otherwise returns a negative value. -
    Fortran90 Interface: h5tcommitted_f -
    -
    -SUBROUTINE h5tcommitted_f(type_id, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: type_id ! Datatype identifier 
    -  INTEGER, INTENT(OUT) :: hdferr        ! Error code
    -END SUBROUTINE h5tcommitted_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Tconvert -
    Signature: -
    herr_t H5Tconvert(hid_t src_id, - hid_t dst_id, - size_t nelmts, - void *buf, - void *background, - hid_t plist_id - ) -
    Purpose: -
    Converts data from between specified datatypes. -
    Description: -
    H5Tconvert converts nelmts elements - from the type specified by the src_id identifier - to type dst_id. - The source elements are packed in buf and on return - the destination will be packed in buf. - That is, the conversion is performed in place. - The optional background buffer is an array of nelmts - values of destination type which are merged with the converted - values to fill in cracks (for instance, background - might be an array of structs with the a and - b fields already initialized and the conversion - of buf supplies the c and d - field values). -

    - The parameter plist_id contains the dataset transfer - property list identifier which is passed to the conversion functions. - As of Release 1.2, this parameter is only used to pass along the - variable-length datatype custom allocation information. -

    Parameters: -
      - - - - - - - - - - - - - - - - - - -
      hid_t src_idIdentifier for the source datatype.
      hid_t dst_idIdentifier for the destination datatype.
      size_t nelmtsSize of array buf.
      void *bufArray containing pre- and post-conversion values.
      void *background    Optional background buffer.
      hid_t plist_idDataset transfer property list identifier.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: -
    None. - - - -
    - - - -
    -
    -
    Name: H5Tcopy -
    Signature: -
    hid_t H5Tcopy(hid_t type_id) -
    Purpose: -
    Copies an existing datatype. -
    Description: -
    H5Tcopy copies an existing datatype. - The returned type is always transient and unlocked. -

    - The type_id argument can be either a datatype - identifier, a predefined datatype (defined in - H5Tpublic.h), or a dataset identifier. - If type_id is a dataset identifier instead of a - datatype identifier, then this function returns a transient, - modifiable datatype which is a copy of the dataset's datatype. -

    - The datatype identifier returned should be released with - H5Tclose or resource leaks will occur. - -

    Parameters: -
      - - - -
      hid_t type_id    Identifier of datatype to copy. Can be a datatype - identifier, a predefined datatype (defined in - H5Tpublic.h), or a dataset identifier.
    -
    Returns: -
    Returns a datatype identifier if successful; - otherwise returns a negative value -
    Fortran90 Interface: h5tcopy_f -
    -
    -SUBROUTINE h5tcopy_f(type_id, new_type_id, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: type_id      ! Datatype identifier 
    -  INTEGER(HID_T), INTENT(OUT) :: new_type_id ! Identifier of datatype's copy 
    -  INTEGER, INTENT(OUT) :: hdferr             ! Error code
    -                                             ! 0 on success and -1 on failure
    -END SUBROUTINE h5tcopy_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Tcreate -
    Signature: -
    hid_t H5Tcreate(H5T_class_t class, - size_tsize - ) -
    Purpose: -
    Creates a new datatype. -
    Description: -
    H5Tcreate creates a new datatype of the specified - class with the specified number of bytes. -

    - The following datatype classes are supported with this function: -

      -
    • H5T_COMPOUND -
    • H5T_OPAQUE -
    • H5T_ENUM -
    -

    - Use H5Tcopy to create integer or floating-point datatypes. -

    - The datatype identifier returned from this function should be - released with H5Tclose or resource leaks will result. -

    Parameters: -
      - - - - - - -
      H5T_class_t class    Class of datatype to create.
      size_t sizeThe number of bytes in the datatype to create.
    -
    Returns: -
    Returns datatype identifier if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5tcreate_f -
    -
    -SUBROUTINE h5tcreate_f(class, size, type_id, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER, INTENT(IN) :: class             ! Datatype class can be one of
    -                                           !    H5T_COMPOUND_F (6)
    -                                           !    H5T_ENUM_F     (8)
    -                                           !    H5T_OPAQUE_F   (9)
    -  INTEGER(SIZE_T), INTENT(IN) :: size      ! Size of the datatype
    -  INTEGER(HID_T), INTENT(OUT) :: type_id   ! Datatype identifier
    -  INTEGER, INTENT(OUT) :: hdferr           ! Error code
    -END SUBROUTINE h5tcreate_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Tdetect_class -
    Signature: -
    htri_t H5Tdetect_class(hid_t dtype_id, - H5T_class_tdtype_class - ) -
    Purpose: -
    Determines whether a datatype contains any datatypes of the - given datatype class. -
    Description: -
    H5Tdetect_class determines whether the datatype - specified in dtype_id contains any datatypes of the - datatype class specified in dtype_class. -

    - This function is useful primarily in recursively examining - all the fields and/or base types - of compound, array, and variable-length datatypes. -

    - Valid class identifiers are as defined in - H5Tget_class. -

    Parameters: -
      - - - - - - -
      hid_t dtype_idDatatype identifier.
      H5T_class_t dtype_class    Datatype class.
    -
    Returns: -
    Returns TRUE or FALSE if successful; - otherwise returns a negative value. -
    Fortran90 Interface: -
    None. - - - -
    - - - -
    -
    -
    Name: H5Tenum_create -
    Signature: -
    hid_t H5Tenum_create(hid_t parent_id - ) -
    Purpose: -
    Creates a new enumeration datatype. -
    Description: -
    H5Tenum_create creates a new enumeration datatype - based on the specified base datatype, parent_id, - which must be an integer type. -
    Parameters: -
      - - - -
      hid_t parent_id    IN: Datatype identifier for the base datatype.
    -
    Returns: -
    Returns the datatype identifier for the new enumeration datatype if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5tenum_create_f -
    -
    -SUBROUTINE h5tenum_create_f(parent_id, new_type_id, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: parent_id     ! Datatype identifier for
    -                                              ! the  base datatype
    -  INTEGER(HID_T), INTENT(OUT) :: new_type_id  ! Datatype identifier for the
    -                                              ! new enumeration datatype    
    -  INTEGER, INTENT(OUT) :: hdferr              ! Error code
    -END SUBROUTINE h5tenum_create_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Tenum_insert -
    Signature: -
    herr_t H5Tenum_insert(hid_t type, - const char *name, - void *value - ) -
    Purpose: -
    Inserts a new enumeration datatype member. -
    Description: -
    H5Tenum_insert inserts a - new enumeration datatype member into an enumeration datatype. -

    - type is the enumeration datatype, - name is the name of the new member, and - value points to the value of the new member. -

    - name and value must both - be unique within type. -

    - value points to data which is of the - datatype defined when the enumeration datatype was created. -

    Parameters: -
      - - - - - - - - - -
      hid_t typeIN: Datatype identifier for the enumeration datatype.
      const char *name    IN: Name of the new member.
      void *valueIN: Pointer to the value of the new member.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5tenum_insert_f -
    -
    -SUBROUTINE h5tenum_insert_f(type_id,  name, value, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: type_id ! Datatype identifier 
    -  CHARACTER(LEN=*), INTENT(IN) :: name  ! Name of the new member
    -  INTEGER, INTENT(IN) :: value          ! Value of the new member
    -  INTEGER, INTENT(OUT) :: hdferr        ! Error code
    -END SUBROUTINE h5tenum_insert_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Tenum_nameof -
    Signature: -
    herr_t H5Tenum_nameof(hid_t type - void *value, - char *name, - size_t size - ) -
    Purpose: -
    Returns the symbol name corresponding to a specified member of an enumeration datatype. -
    Description: -
    H5Tenum_nameof finds the symbol name that - corresponds to the specified value - of the enumeration datatype type. -

    - At most size characters of the symbol - name are copied into the name buffer. - If the entire symbol name and null terminator - do not fit in the name buffer, then as - many characters as possible are copied - (not null terminated) and the function fails. -

    Parameters: -
      - - - - - - - - - - - - -
      hid_t typeIN: Enumeration datatype identifier.
      void *value,    IN: Value of the enumeration datatype.
      char *name,OUT: Buffer for output of the symbol name.
      size_t sizeIN: Anticipated size of the symbol name, in bytes (characters).
    -
    Returns: -
    Returns a non-negative value if successful. - Otherwise returns a negative value - and, if size allows it, - the first character of name is - set to NULL. -
    Fortran90 Interface: h5tenum_nameof_f -
    -
    -SUBROUTINE h5tenum_nameof_f(type_id,  name, namelen, value, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: type_id  ! Datatype identifier 
    -  CHARACTER(LEN=*), INTENT(OUT) :: name  ! Name of the  enumeration datatype
    -  INTEGER(SIZE_T), INTENT(IN) :: namelen ! Length of the name
    -  INTEGER, INTENT(IN) :: value           ! Value of the  enumeration datatype
    -  INTEGER, INTENT(OUT) :: hdferr         ! Error code
    -END SUBROUTINE h5tenum_nameof_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Tenum_valueof -
    Signature: -
    herr_t H5Tenum_valueof(hid_t type - char *name, - void *value - ) -
    Purpose: -
    Returns the value corresponding to a specified member of an enumeration datatype. -
    Description: -
    H5Tenum_valueof finds the value that - corresponds to the specified name - of the enumeration datatype type. -

    - The value argument should be at least - as large as the value of H5Tget_size(type) - in order to hold the result. -

    Parameters: -
      - - - - - - - - - -
      hid_t typeIN: Enumeration datatype identifier.
      const char *name,    IN: Symbol name of the enumeration datatype.
      void *value,OUT: Buffer for output of the value of the enumeration datatype.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5tenum_valueof_f -
    -
    -SUBROUTINE h5tenum_valueof_f(type_id,  name, value, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: type_id ! Datatype identifier 
    -  CHARACTER(LEN=*), INTENT(IN) :: name  ! Name of the enumeration datatype
    -  INTEGER, INTENT(OUT) :: value         ! Value of the enumeration datatype
    -  INTEGER, INTENT(OUT) :: hdferr        ! Error code
    -END SUBROUTINE h5tenum_valueof_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Tequal -
    Signature: -
    htri_t H5Tequal(hid_t type_id1, - hid_ttype_id2 - ) -
    Purpose: -
    Determines whether two datatype identifiers refer to the same datatype. -
    Description: -
    H5Tequal determines whether two datatype identifiers - refer to the same datatype. -
    Parameters: -
      - - - - - - -
      hid_t type_id1    Identifier of datatype to compare.
      hid_t type_id2Identifier of datatype to compare.
    -
    Returns: -
    When successful, returns a positive value, for TRUE, - if the datatype identifiers refer to the same datatype, - or 0 (zero), for FALSE. - Otherwise returns a negative value. -
    Fortran90 Interface: h5tequal_f -
    -
    -SUBROUTINE h5tequal_f(type1_id, type2_id, flag, hdferr)
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: type1_id ! Datatype identifier 
    -  INTEGER(HID_T), INTENT(IN) :: type2_id ! Datatype identifier 
    -  LOGICAL, INTENT(OUT) :: flag           ! TRUE/FALSE flag to indicate 
    -                                         ! if two datatypes are equal
    -  INTEGER, INTENT(OUT) :: hdferr         ! Error code
    -END SUBROUTINE h5tequal_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Tfind -
    Signature: -
    H5T_conv_t H5Tfind(hid_t src_id, - hid_t dst_id, - H5T_cdata_t **pcdata - ) -
    Purpose: -
    Finds a conversion function. -
    Description: -
    H5Tfind finds a conversion function that can - handle a conversion from type src_id to type - dst_id. - The pcdata argument is a pointer to a pointer - to type conversion data which was created and initialized - by the soft type conversion function of this path when the - conversion function was installed on the path. -
    Parameters: -
      - - - - - - - - - -
      hid_t src_idIN: Identifier for the source datatype.
      hid_t dst_idIN: Identifier for the destination datatype.
      H5T_cdata_t **pcdata    OUT: Pointer to type conversion data.
    -
    Returns: -
    Returns a pointer to a suitable conversion function if successful. - Otherwise returns NULL. -
    Fortran90 Interface: -
    None. - - - -
    - - - -
    -
    -
    Name: H5Tget_array_dims -
    Signature: -
    int H5Tget_array_dims( - hid_t adtype_id, - hsize_t *dims[], - int *perm[] - ) -
    Purpose: -
    Retrieves sizes of array dimensions and dimension permutations. -
    Description: -
    H5Tget_array_dims returns the sizes of the dimensions - and the dimension permutations of the specified array datatype object. -

    - The sizes of the dimensions are returned in the array dims. - The dimension permutations, i.e., C versus FORTRAN array order, - are returned in the array perm. -

    Parameters: -
      - - - - - - - - - -
      hid_t adtype_id    IN: Datatype identifier of array object.
      hsize_t *dims[]OUT: Sizes of array dimensions.
      int *perm[]OUT: Dimension permutations.
    -
    Returns: -
    Returns the non-negative number of dimensions of the array type if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5tget_array_dims_f -
    -
    -SUBROUTINE h5tget_array_dims_f(type_id, dims, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: type_id      ! Identifier of the array datatype
    -  INTEGER(HSIZE_T), DIMENSION(*), INTENT(OUT) ::  dims 
    -                                             ! Buffer to store array datatype
    -  INTEGER, INTENT(OUT)  :: hdferr            ! Error code
    -END SUBROUTINE h5tget_array_dims_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Tget_array_ndims -
    Signature: -
    int H5Tget_array_ndims( - hid_t adtype_id - ) -
    Purpose: -
    Returns the rank of an array datatype. -
    Description: -
    H5Tget_array_ndims returns the rank, - the number of dimensions, of an array datatype object. -
    Parameters: -
      - - - -
      hid_t adtype_id    IN: Datatype identifier of array object.
    -
    Returns: -
    Returns the rank of the array if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5tget_array_ndims_f -
    -
    -SUBROUTINE h5tget_array_ndims_f(type_id, ndims, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: type_id  ! Identifier of the array datatype
    -  INTEGER, INTENT(OUT)       ::  ndims   ! Number of array dimensions
    -  INTEGER, INTENT(OUT)       :: hdferr   ! Error code
    -END SUBROUTINE h5tget_array_ndims_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Tget_class -
    Signature: -
    H5T_class_t H5Tget_class(hid_t type_id - ) -
    Purpose: -
    Returns the datatype class identifier. -
    Description: -
    H5Tget_class returns the datatype class identifier. -

    - Valid class identifiers, as defined in H5Tpublic.h, are: -

    • H5T_INTEGER -
    • H5T_FLOAT -
    • H5T_TIME -
    • H5T_STRING -
    • H5T_BITFIELD -
    • H5T_OPAQUE -
    • H5T_COMPOUND -
    • H5T_REFERENCE -
    • H5T_ENUM -
    • H5T_VLEN -
    • H5T_ARRAY -
    -

    - Note that the library returns H5T_STRING - for both fixed-length and variable-length strings. -

    Parameters: -
      - - - -
      hid_t type_id    Identifier of datatype to query.
    -
    Returns: -
    Returns datatype class identifier if successful; - otherwise H5T_NO_CLASS (-1). -
    Fortran90 Interface: h5tget_class_f -
    -
    -SUBROUTINE h5tget_class_f(type_id, class, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: type_id  ! Datatype identifier 
    -  INTEGER, INTENT(OUT) :: class          ! Datatype class, possible values are:
    -                                         !    H5T_NO_CLASS_F 
    -                                         !    H5T_INTEGER_F 
    -                                         !    H5T_FLOAT_F
    -                                         !    H5T_TIME_F
    -                                         !    H5T_STRING_F
    -                                         !    H5T_BITFIELD_F
    -                                         !    H5T_OPAQUE_F
    -                                         !    H5T_COMPOUND_F
    -                                         !    H5T_REFERENCE_F
    -                                         !    H5T_ENUM_F
    -  INTEGER, INTENT(OUT) :: hdferr         ! Error code
    -                                         ! 0 on success and -1 on failure
    -END SUBROUTINE h5tget_class_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Tget_cset -
    Signature: -
    H5T_cset_t H5Tget_cset(hid_t type_id - ) -
    Purpose: -
    Retrieves the character set type of a string datatype. -
    Description: -
    H5Tget_cset retrieves the character set type - of a string datatype. Valid character set types are: -
      -
      H5T_CSET_ASCII (0) -
      Character set is US ASCII -
    -
    Parameters: -
      - - - -
      hid_t type_id    Identifier of datatype to query.
    -
    Returns: -
    Returns a valid character set type if successful; - otherwise H5T_CSET_ERROR (-1). -
    Fortran90 Interface: h5tget_cset_f -
    -
    -SUBROUTINE h5tget_cset_f(type_id, cset, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: type_id ! Datatype identifier 
    -  INTEGER, INTENT(OUT) :: cset          ! Character set type of a string 
    -                                        ! datatype 
    -                                        ! Possible values of padding type are:
    -                                        !    H5T_CSET_ASCII_F = 0
    -  INTEGER, INTENT(OUT) :: hdferr        ! Error code
    -END SUBROUTINE h5tget_cset_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Tget_ebias -
    Signature: -
    size_t H5Tget_ebias(hid_t type_id - ) -
    Purpose: -
    Retrieves the exponent bias of a floating-point type. -
    Description: -
    H5Tget_ebias retrieves the exponent bias of a floating-point type. -
    Parameters: -
      - - - -
      hid_t type_id    Identifier of datatype to query.
    -
    Returns: -
    Returns the bias if successful; - otherwise 0. -
    Fortran90 Interface: h5tget_ebias_f -
    -
    -SUBROUTINE h5tget_ebias_f(type_id, ebias, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: type_id ! Datatype identifier 
    -  INTEGER, INTENT(OUT) :: ebias         ! Datatype exponent bias 
    -                                        ! of a floating-point type
    -  INTEGER, INTENT(OUT) :: hdferr        ! Error code
    -END SUBROUTINE h5tget_ebias_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Tget_fields -
    Signature: -
    herr_t H5Tget_fields(hid_t type_id, - size_t *spos, - size_t *epos, - size_t *esize, - size_t *mpos, - size_t *msize - ) -
    Purpose: -
    Retrieves floating point datatype bit field information. -
    Description: -
    H5Tget_fields retrieves information about the locations of the various - bit fields of a floating point datatype. The field positions are bit - positions in the significant region of the datatype. Bits are - numbered with the least significant bit number zero. - Any (or even all) of the arguments can be null pointers. -
    Parameters: -
      - - - - - - - - - - - - - - - - - - -
      hid_t type_id    IN: Identifier of datatype to query.
      size_t *sposOUT: Pointer to location to return floating-point sign bit.
      size_t *eposOUT: Pointer to location to return exponent bit-position.
      size_t *esizeOUT: Pointer to location to return size of exponent in bits.
      size_t *mposOUT: Pointer to location to return mantissa bit-position.
      size_t *msizeOUT: Pointer to location to return size of mantissa in bits.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5tget_fields_f -
    -
    -SUBROUTINE h5tget_fields_f(type_id, epos, esize, mpos, msize, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: type_id ! Datatype identifier 
    -  INTEGER, INTENT(OUT) :: epos          ! Exponent bit-position 
    -  INTEGER, INTENT(OUT) :: esize         ! Size of exponent in bits
    -  INTEGER, INTENT(OUT) :: mpos          ! Mantissa bit-position 
    -  INTEGER, INTENT(OUT) :: msize         ! Size of mantissa in bits
    -  INTEGER, INTENT(OUT) :: hdferr        ! Error code
    -END SUBROUTINE h5tget_fields_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Tget_inpad -
    Signature: -
    H5T_pad_t H5Tget_inpad(hid_t type_id - ) -
    Purpose: -
    Retrieves the internal padding type for unused bits in floating-point datatypes. -
    Description: -
    H5Tget_inpad retrieves the internal padding type for - unused bits in floating-point datatypes. - Valid padding types are: -
      -
      H5T_PAD_ZERO (0) -
      Set background to zeros. -
      H5T_PAD_ONE (1) -
      Set background to ones. -
      H5T_PAD_BACKGROUND (2) -
      Leave background alone. -
    -
    Parameters: -
      - - - -
      hid_t type_id    Identifier of datatype to query.
    -
    Returns: -
    Returns a valid padding type if successful; - otherwise H5T_PAD_ERROR (-1). -
    Fortran90 Interface: h5tget_inpad_f -
    -
    -SUBROUTINE h5tget_inpad_f(type_id, padtype, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: type_id ! Datatype identifier 
    -  INTEGER, INTENT(OUT) :: padtype       ! Padding type for unused bits 
    -                                        ! in floating-point datatypes
    -                                        ! Possible values of padding type are:
    -                                        !     H5T_PAD_ZERO_F = 0
    -                                        !     H5T_PAD_ONE_F = 1
    -                                        !     H5T_PAD_BACKGROUND_F = 2
    -  INTEGER, INTENT(OUT) :: hdferr        ! Error code
    -END SUBROUTINE h5tget_inpad_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Tget_member_class -
    Signature: -
    H5T_class_t H5Tget_member_class( - hid_t cdtype_id, - unsigned member_no - ) -
    Purpose: -
    Returns datatype class of compound datatype member. -
    Description: -
    Given a compound datatype, cdtype_id, the function - H5Tget_member_class returns the datatype class of - the compound datatype member specified by member_no. -

    - Valid class identifiers are as defined in - H5Tget_class. -

    Parameters: -
      - - - - - - -
      hid_t cdtype_id    IN: Datatype identifier of compound object.
      unsigned member_noIN: Compound object member number.
    -
    Returns: -
    Returns the datatype class, a non-negative value, if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5tget_member_class_f -
    -
    -SUBROUTINE h5tget_member_class_f(type_id, member_no, class, hdferr) 
    -  INTEGER(HID_T), INTENT(IN) :: type_id       ! Datatype identifier 
    -  INTEGER, INTENT(IN) :: member_no            ! Member number
    -  INTEGER, INTENT(OUT) :: class               ! Member class
    -  INTEGER, INTENT(OUT) :: hdferr              ! Error code
    -END SUBROUTINE h5tget_member_class_f
    -        
    - -
    - - - -
    -
    -
    Name: H5Tget_member_index -
    Signature: -
    int H5Tget_member_index(hid_t type_id, - const char * field_name - ) -
    Purpose: -
    Retrieves the index of a compound or enumeration datatype member. -
    Description: -
    H5Tget_member_index retrieves the index of a field - of a compound datatype or an element of an enumeration datatype. -

    - The name of the target field or element is specified in - field_name. -

    - Fields are stored in no particular order - with index values of 0 through N-1, where N is - the value returned by H5Tget_nmembers. -

    Parameters: -
      - - - - - - -
      hid_t type_idIdentifier of datatype to query.
      const char * field_name    Name of the field or member whose index is to be retrieved.
    -
    Returns: -
    Returns a valid field or member index if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5tget_member_index_f -
    -
    -SUBROUTINE h5tget_member_index_f(type_id, name, index, hdferr) 
    -  INTEGER(HID_T), INTENT(IN) :: type_id  ! Datatype identifier 
    -  CHARACTER(LEN=*), INTENT(IN) :: name   ! Member name
    -  INTEGER, INTENT(OUT) :: index          ! Member index
    -  INTEGER, INTENT(OUT) :: hdferr         ! Error code
    -END SUBROUTINE h5tget_member_index_f
    -	
    - -
    - - - -
    -
    -
    Name: H5Tget_member_name -
    Signature: -
    char * H5Tget_member_name(hid_t type_id, - unsigned field_idx - ) -
    Purpose: -
    Retrieves the name of a compound or enumeration datatype member. -
    Description: -
    H5Tget_member_name retrieves the name of a field - of a compound datatype or an element of an enumeration datatype. -

    - The index of the target field or element is specified in - field_idx. - Compound datatype fields and enumeration datatype elements - are stored in no particular order - with index values of 0 through N-1, where N - is the value returned by H5Tget_nmembers. -

    - A buffer to receive the name of the field is - allocated with malloc() and the caller is responsible - for freeing the memory used. -

    Parameters: -
      - - - - - - -
      hid_t type_id    Identifier of datatype to query.
      unsigned field_idxZero-based index of the field or element whose name - is to be retrieved.
    -
    Returns: -
    Returns a valid pointer to a string allocated with - malloc() if successful; - otherwise returns NULL. -
    Fortran90 Interface: h5tget_member_name_f -
    -
    -SUBROUTINE h5tget_member_name_f(type_id,index, member_name,  namelen, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: type_id        ! Datatype identifier 
    -  INTEGER, INTENT(IN) :: index                 ! Field index (0-based) of 
    -                                               ! the field name to retrieve 
    -  CHARACTER(LEN=*), INTENT(OUT) :: member_name ! Name of a field of
    -                                               ! a compound datatype 
    -  INTEGER, INTENT(OUT) :: namelen              ! Length of the name 
    -  INTEGER, INTENT(OUT) :: hdferr               ! Error code
    -END SUBROUTINE h5tget_member_name_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Tget_member_offset -
    Signature: -
    size_t H5Tget_member_offset(hid_t type_id, - unsigned memb_no - ) -
    Purpose: -
    Retrieves the offset of a field of a compound datatype. -
    Description: -
    H5Tget_member_offset retrieves the - byte offset of the beginning of a field within a - compound datatype with respect to the beginning - of the compound data type datum. -
    Parameters: -
      - - - - - - -
      hid_t type_id    Identifier of datatype to query.
      unsigned memb_noNumber of the field whose offset is requested.
    -
    Returns: -
    Returns the byte offset of the field if successful; - otherwise returns 0 (zero). - Note that zero is a valid offset and that this function - will fail only if a call to H5Tget_member_class() - fails with the same arguments. -
    Fortran90 Interface: h5tget_member_offset_f -
    -
    -SUBROUTINE h5tget_member_offset_f(type_id, member_no, offset, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: type_id    ! Datatype identifier 
    -  INTEGER, INTENT(IN) :: member_no         ! Number of the field  
    -                                           ! whose offset is requested
    -  INTEGER(SIZE_T), INTENT(OUT) :: offset   ! Byte offset of the the 
    -                                           ! beginning of the field
    -  INTEGER, INTENT(OUT) :: hdferr           ! Error code
    -END SUBROUTINE h5tget_member_offset_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Tget_member_type -
    Signature: -
    hid_t H5Tget_member_type(hid_t type_id, - unsigned field_idx - ) -
    Purpose: -
    Returns the datatype of the specified member. -
    Description: -
    H5Tget_member_type returns the datatype of the specified member. The caller - should invoke H5Tclose() to release resources associated with the type. -
    Parameters: -
      - - - - - - -
      hid_t type_id    Identifier of datatype to query.
      unsigned field_idxField index (0-based) of the field type to retrieve.
    -
    Returns: -
    Returns the identifier of a copy of the datatype of the field - if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5tget_member_type_f -
    -
    -SUBROUTINE h5tget_member_type_f(type_id,  field_idx, datatype, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: type_id   ! Datatype identifier 
    -  INTEGER, INTENT(IN) :: field_idx        ! Field index (0-based) of the 
    -                                          ! field type to retrieve
    -  INTEGER(HID_T), INTENT(OUT) :: datatype ! Identifier of a copy of 
    -                                          ! the datatype of the field 
    -  INTEGER, INTENT(OUT) :: hdferr          ! Error code
    -END SUBROUTINE h5tget_member_type_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Tget_member_value -
    Signature: -
    herr_t H5Tget_member_value(hid_t type - unsigned memb_no, - void *value - ) -
    Purpose: -
    Returns the value of an enumeration datatype member. -
    Description: -
    H5Tget_member_value returns the value of - the enumeration datatype member memb_no. -

    - The member value is returned in a user-supplied buffer - pointed to by value. -

    Parameters: -
      - - - - - - - - - -
      hid_t typeIN: Datatype identifier for the enumeration datatype.
      unsigned memb_no,IN: Number of the enumeration datatype member.
      void *value   OUT: Pointer to a buffer for output of the - value of the enumeration datatype member.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5tget_member_value_f -
    -
    -SUBROUTINE h5tget_member_value_f(type_id,  member_no, value, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: type_id ! Datatype identifier 
    -  INTEGER, INTENT(IN) :: member_no      ! Number of the enumeration 
    -                                        ! datatype member
    -  INTEGER, INTENT(OUT) :: value         ! Value of the enumeration datatype
    -  INTEGER, INTENT(OUT) :: hdferr        ! Error code
    -END SUBROUTINE h5tget_member_value_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Tget_native_type -
    Signature: -
    hid_t H5Tget_native_type(hid_t type_id, - H5T_direction_t direction - ) -
    Purpose: -
    Returns the native datatype of a specified datatype. -
    Description: -
    H5Tget_native_type returns the equivalent native datatype - for the datatype specified in type_id. -

    - H5Tget_native_type is a high-level function designed - primarily to facilitate use of the H5Dread function, - for which users otherwise must undertake a multi-step process to - determine the native datatype of a dataset prior to reading it - into memory. - It can be used not only to determine - the native datatype for atomic datatypes, - but also to determine the native datatypes of the individual components of - a compound datatype, an enumerated datatype, an array datatype, or - a variable-length datatype. -

    - H5Tget_native_type selects the matching native datatype - from the following list: -

            H5T_NATIVE_CHAR         
    -        H5T_NATIVE_SHORT        
    -        H5T_NATIVE_INT          
    -        H5T_NATIVE_LONG         
    -        H5T_NATIVE_LLONG        
    -
    -        H5T_NATIVE_UCHAR
    -        H5T_NATIVE_USHORT
    -        H5T_NATIVE_UINT
    -        H5T_NATIVE_ULONG
    -        H5T_NATIVE_ULLONG
    -
    -        H5T_NATIVE_FLOAT
    -        H5T_NATIVE_DOUBLE
    -        H5T_NATIVE_LDOUBLE
    -

    - The direction parameter indicates the order - in which the library searches for a native datatype match. - Valid values for direction are as follows: - - - -
         - H5T_DIR_ASCEND - Searches the above list in ascending size of the datatype,
    - i.e., from top to bottom. (Default) -
      - H5T_DIR_DESCEND   - Searches the above list in descending size of the datatype,
    - i.e., from bottom to top. -
    -

    - H5Tget_native_type is designed primarily for - use with intenger and floating point datatypes. - Time, bifield, opaque, and reference datatypes are returned - as a copy of type_id. -

    - The identifier returned by H5Tget_native_type - should eventually be closed by calling H5Tclose - to release resources. -

    Parameters: -
      - - - - - - -
      hid_t type_idDatatype identifier for the dataset datatype.
      H5T_direction_t direction    Direction of search.
    - -
    Returns: -
    Returns the native datatype identifier for the - specified dataset datatype if successful; - otherwise returns a negative value. -
    Fortran90 Interface: -
    None. - - - -
    - - - -
    -
    -
    Name: H5Tget_nmembers -
    Signature: -
    int H5Tget_nmembers(hid_t type_id - ) -
    Purpose: -
    Retrieves the number of elements in a compound or enumeration datatype. -
    Description: -
    H5Tget_nmembers retrieves - the number of fields in a compound datatype or - the number of members of an enumeration datatype. -
    Parameters: -
      - - - -
      hid_t type_id    Identifier of datatype to query.
    -
    Returns: -
    Returns the number of elements if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5tget_nmembers_f -
    -
    -SUBROUTINE h5tget_nmembers_f(type_id, num_members, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: type_id ! Datatype identifier 
    -  INTEGER, INTENT(OUT) :: num_members   ! Number of fields in a 
    -                                        ! compound datatype 
    -  INTEGER, INTENT(OUT) :: hdferr        ! Error code
    -END SUBROUTINE h5tget_nmembers_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Tget_norm -
    Signature: -
    H5T_norm_t H5Tget_norm(hid_t type_id - ) -
    Purpose: -
    Retrieves mantissa normalization of a floating-point datatype. -
    Description: -
    H5Tget_norm retrieves the mantissa normalization of - a floating-point datatype. Valid normalization types are: -
      -
      H5T_NORM_IMPLIED (0) -
      MSB of mantissa is not stored, always 1 -
      H5T_NORM_MSBSET (1) -
      MSB of mantissa is always 1 -
      H5T_NORM_NONE (2) -
      Mantissa is not normalized -
    -
    Parameters: -
      - - - -
      hid_t type_id    Identifier of datatype to query.
    -
    Returns: -
    Returns a valid normalization type if successful; - otherwise H5T_NORM_ERROR (-1). -
    Fortran90 Interface: h5tget_norm_f -
    -
    -SUBROUTINE h5tget_norm_f(type_id, norm, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: type_id  
    -                                 ! Datatype identifier 
    -  INTEGER, INTENT(OUT) :: norm   ! Mantissa normalization of a 
    -                                 ! floating-point datatype
    -                                 ! Valid normalization types are:
    -                                 !    H5T_NORM_IMPLIED_F(0) 
    -                                 !        MSB of mantissa is not 
    -                                 !        stored, always 1
    -                                 !    H5T_NORM_MSBSET_F(1) 
    -                                 !        MSB of mantissa is always 1 
    -                                 !    H5T_NORM_NONE_F(2) 
    -                                 !        Mantissa is not normalized
    -  INTEGER, INTENT(OUT) :: hdferr ! Error code
    -END SUBROUTINE h5tget_norm_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Tget_offset -
    Signature: -
    int H5Tget_offset(hid_t type_id - ) -
    Purpose: -
    Retrieves the bit offset of the first significant bit. -
    Description: -
    H5Tget_offset retrieves the bit offset of the first significant bit. - The significant bits of an atomic datum can be offset from the beginning - of the memory for that datum by an amount of padding. The `offset' - property specifies the number of bits of padding that appear to the - "right of" the value. That is, if we have a 32-bit datum with 16-bits - of precision having the value 0x1122 then it will be laid out in - memory as (from small byte address toward larger byte addresses): -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Byte PositionBig-Endian Offset=0Big-Endian Offset=16Little-Endian Offset=0Little-Endian Offset=16
    0:[ pad][0x11][0x22][ pad]
    1:[ pad][0x22][0x11][ pad]
    2:[0x11][ pad][ pad][0x22]
    3:[0x22][ pad][ pad][0x11]
    -
    Parameters: -
      - - - -
      hid_t type_id    Identifier of datatype to query.
    -
    Returns: -
    Returns an offset value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5tget_offset_f -
    -
    -SUBROUTINE h5tget_offset_f(type_id, offset, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: type_id ! Datatype identifier 
    -  INTEGER, INTENT(OUT) :: offset        ! Datatype bit offset of the
    -                                        ! first significant bit
    -  INTEGER, INTENT(OUT) :: hdferr        ! Error code
    -END SUBROUTINE h5tget_offset_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Tget_order -
    Signature: -
    H5T_order_t H5Tget_order(hid_t type_id - ) -
    Purpose: -
    Returns the byte order of an atomic datatype. -
    Description: -
    H5Tget_order returns the byte order of an - atomic datatype. -

    - Possible return values are: -

      -
      H5T_ORDER_LE (0) -
      Little endian byte ordering (default). -
      H5T_ORDER_BE (1) -
      Big endian byte ordering. -
      H5T_ORDER_VAX (2) -
      VAX mixed byte ordering (not currently supported). -
    -
    Parameters: -
      - - - -
      hid_t type_id    Identifier of datatype to query.
    -
    Returns: -
    Returns a byte order constant if successful; - otherwise H5T_ORDER_ERROR (-1). -
    Fortran90 Interface: h5tget_order_f -
    -
    -SUBROUTINE h5tget_order_f(type_id, order, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: type_id ! Datatype identifier 
    -  INTEGER, INTENT(OUT) :: order         ! Datatype byte order 
    -                                        ! Possible values are:
    -                                        !    H5T_ORDER_LE_F 
    -                                        !    H5T_ORDER_BE_F 
    -                                        !    H5T_ORDER_VAX_F  
    -  INTEGER, INTENT(OUT) :: hdferr        ! Error code
    -                                        ! 0 on success and -1 on failure
    -END SUBROUTINE h5tget_order_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Tget_overflow -
    Signature: - - -H5Tget_overflow () -
    H5T_overflow_t H5Tget_overflow(void) -
    Purpose: -
    Returns a pointer to the current global overflow function. -
    Description: -
    H5Tset_overflow returns a pointer - to the current global overflow function. - This is an application-defined function that is called whenever a - datatype conversion causes an overflow. -
    Parameters: -
    -
    None. -
    -
    Returns: -
    Returns a pointer to an application-defined function if successful. - Otherwise returns NULL; this can happen if no overflow handling - function is registered. -
    Fortran90 Interface: -
    None. - - - -
    - - - -
    -
    -
    Name: H5Tget_pad -
    Signature: -
    herr_t H5Tget_pad(hid_t type_id, - H5T_pad_t * lsb, - H5T_pad_t * msb - ) -
    Purpose: -
    Retrieves the padding type of the least and most-significant bit padding. -
    Description: -
    H5Tget_pad retrieves the padding type of the least and most-significant - bit padding. Valid types are: -
      -
      H5T_PAD_ZERO (0) -
      Set background to zeros. -
      H5T_PAD_ONE (1) -
      Set background to ones. -
      H5T_PAD_BACKGROUND (2) -
      Leave background alone. -
    -
    Parameters: -
      - - - - - - - - - -
      hid_t type_idIN: Identifier of datatype to query.
      H5T_pad_t * lsb    OUT: Pointer to location to return least-significant - bit padding type.
      H5T_pad_t * msbOUT: Pointer to location to return most-significant - bit padding type.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5tget_pad_f -
    -
    -SUBROUTINE h5tget_pad_f(type_id, lsbpad, msbpad, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: type_id ! Datatype identifier 
    -  INTEGER, INTENT(OUT) :: lsbpad        ! Padding type of the  
    -                                        ! least significant bit
    -  INTEGER, INTENT(OUT) :: msbpad        ! Padding type of the 
    -                                        ! most significant bit
    -                                        ! Possible values of 
    -                                        ! padding type are:
    -                                        !    H5T_PAD_ZERO_F = 0
    -                                        !    H5T_PAD_ONE_F = 1
    -                                        !    H5T_PAD_BACKGROUND_F = 2
    -                                        !    H5T_PAD_ERROR_F = -1
    -                                        !    H5T_PAD_NPAD_F = 3
    -  INTEGER, INTENT(OUT) :: hdferr        ! Error code
    -END SUBROUTINE h5tget_pad_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Tget_precision -
    Signature: -
    size_t H5Tget_precision(hid_t type_id - ) -
    Purpose: -
    Returns the precision of an atomic datatype. -
    Description: -
    H5Tget_precision returns the precision of an atomic datatype. The - precision is the number of significant bits which, unless padding is - present, is 8 times larger than the value returned by H5Tget_size(). -
    Parameters: -
      - - - -
      hid_t type_id    Identifier of datatype to query.
    -
    Returns: -
    Returns the number of significant bits if successful; - otherwise 0. -
    Fortran90 Interface: h5tget_precision_f -
    -
    -SUBROUTINE h5tget_precision_f(type_id, precision, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: type_id ! Datatype identifier 
    -  INTEGER, INTENT(OUT) :: precision     ! Datatype precision
    -  INTEGER, INTENT(OUT) :: hdferr        ! Error code
    -END SUBROUTINE h5tget_precision_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Tget_sign -
    Signature: -
    H5T_sign_t H5Tget_sign(hid_t type_id - ) -
    Purpose: -
    Retrieves the sign type for an integer type. -
    Description: -
    H5Tget_sign retrieves the sign type for an integer type. - Valid types are: -
      -
      H5T_SGN_NONE (0) -
      Unsigned integer type. -
      H5T_SGN_2 (1) -
      Two's complement signed integer type. -
    -
    Parameters: -
      - - - -
      hid_t type_id    Identifier of datatype to query.
    -
    Returns: -
    Returns a valid sign type if successful; - otherwise H5T_SGN_ERROR (-1). -
    Fortran90 Interface: h5tget_sign_f -
    -
    -SUBROUTINE h5tget_sign_f(type_id, sign, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: type_id  ! Datatype identifier 
    -  INTEGER, INTENT(OUT) :: sign           ! Sign type for an integer type
    -                                         ! Possible values are:
    -                                         !    Unsigned integer type 
    -                                         !        H5T_SGN_NONE_F = 0
    -                                         !    Two's complement signed 
    -                                         !        integer type
    -                                         !        H5T_SGN_2_F = 1
    -                                         !    or error value
    -                                         !         H5T_SGN_ERROR_F = -1 
    -  INTEGER, INTENT(OUT) :: hdferr         ! Error code
    -END SUBROUTINE h5tget_sign_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Tget_size -
    Signature: -
    size_t H5Tget_size(hid_t type_id - ) -
    Purpose: -
    Returns the size of a datatype. -
    Description: -
    H5Tget_size returns the size of a datatype in bytes. -
    Parameters: -
      - - - -
      hid_t type_id    Identifier of datatype to query.
    -
    Returns: -
    Returns the size of the datatype in bytes if successful; - otherwise 0. -
    Fortran90 Interface: h5tget_size_f -
    -
    -SUBROUTINE h5tget_size_f(type_id, size, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: type_id ! Datatype identifier 
    -  INTEGER(SIZE_T), INTENT(OUT) :: size  ! Datatype size
    -  INTEGER, INTENT(OUT) :: hdferr        ! Error code
    -                                        ! 0 on success and -1 on failure
    -END SUBROUTINE h5tget_size_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Tget_strpad -
    Signature: -
    H5T_str_t H5Tget_strpad(hid_t type_id - ) -
    Purpose: -
    Retrieves the storage mechanism for a string datatype. -
    Description: -
    H5Tget_strpad retrieves the storage mechanism - for a string datatype, as defined in - H5Tset_strpad. -
    Parameters: -
      - - - -
      hid_t type_id    Identifier of datatype to query.
    -
    Returns: -
    Returns a valid string storage mechanism if successful; - otherwise H5T_STR_ERROR (-1). -
    Fortran90 Interface: h5tget_strpad_f -
    -
    -SUBROUTINE h5tget_strpad_f(type_id, strpad, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: type_id    
    -                                  ! Datatype identifier 
    -  INTEGER, INTENT(OUT) :: strpad  ! String padding method for a string datatype 
    -                                  ! Possible values of padding type are:
    -                                  !    Pad with zeros (as C does): 
    -                                  !        H5T_STR_NULLPAD_F(0) 
    -                                  !    Pad with spaces (as FORTRAN does): 
    -                                  !        H5T_STR_SPACEPAD_F(1)
    -  INTEGER, INTENT(OUT) :: hdferr  ! Error code
    -END SUBROUTINE h5tget_strpad_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Tget_super -
    Signature: -
    hid_t H5Tget_super(hid_t type - ) -
    Purpose: -
    Returns the base datatype from which a datatype is derived. -
    Description: -
    H5Tget_super returns the base datatype from which the - datatype type is derived. -

    - In the case of an enumeration type, the return value is an integer type. -

    Parameters: -
      - - - -
      hid_t type    Datatype identifier for the derived datatype.
    -
    Returns: -
    Returns the datatype identifier for the base datatype if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5tget_super_f -
    -
    -SUBROUTINE h5tget_super_f(type_id, base_type_id, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: type_id  ! Datatype identifier 
    -  INTEGER(HID_T), INTENT(OUT) :: type_id ! Base datatype identifier 
    -  INTEGER, INTENT(OUT) :: hdferr         ! Error code
    -END SUBROUTINE h5tget_super_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Tget_tag -
    Signature: -
    char *H5Tget_tag(hid_t type_id - ) -
    Purpose: -
    Gets the tag associated with an opaque datatype. -
    Description: -
    H5Tget_tag returns the tag associated with - the opaque datatype type_id. -

    - The tag is returned via a pointer to an - allocated string, which the caller must free. -

    Parameters: -
      - - - -
      hid_t type_id    Datatype identifier for the opaque datatype.
    -
    Returns: -
    Returns a pointer to an allocated string if successful; - otherwise returns NULL. -
    Fortran90 Interface: h5tget_tag_f -
    -
    -SUBROUTINE h5tget_tag_f(type_id, tag,taglen, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: type_id  ! Datatype identifier 
    -  CHARACTER(LEN=*), INTENT(OUT) :: tag   ! Unique ASCII string with which the
    -                                         ! opaque datatype is to be tagged
    -  INTEGER, INTENT(OUT) :: taglen         ! Length of tag 
    -  INTEGER, INTENT(OUT) :: hdferr         ! Error code
    -END SUBROUTINE h5tget_tag_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Tinsert -
    Signature: -
    herr_t H5Tinsert(hid_t type_id, - const char * name, - size_t offset, - hid_t field_id - ) -
    Purpose: -
    Adds a new member to a compound datatype. -
    Description: -
    H5Tinsert adds another member to the compound datatype - type_id. The new member has a name which - must be unique within the compound datatype. - The offset argument defines the start of the member - in an instance of the compound datatype, and field_id - is the datatype identifier of the new member. -

    - Note: Members of a compound datatype do not have to be atomic datatypes; - a compound datatype can have a member which is a compound datatype. -

    Parameters: -
      - - - - - - - - - - - - -
      hid_t type_idIdentifier of compound datatype to modify.
      const char * name    Name of the field to insert.
      size_t offsetOffset in memory structure of the field to insert.
      hid_t field_idDatatype identifier of the field to insert.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5tinsert_f -
    -
    -SUBROUTINE h5tinsert_f(type_id,  name, offset, field_id, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: type_id  ! Datatype identifier 
    -  CHARACTER(LEN=*), INTENT(IN) :: name   ! Name of the field to insert
    -  INTEGER(SIZE_T), INTENT(IN) :: offset  ! Offset in memory structure 
    -                                         ! of the field to insert
    -  INTEGER(HID_T), INTENT(IN) :: field_id ! Datatype identifier of the 
    -                                         ! new member
    -  INTEGER, INTENT(OUT) :: hdferr         ! Error code
    -END SUBROUTINE h5tinsert_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Tis_variable_str -
    Signature: -
    htri_t H5Tis_variable_str(hid_t dtype_id - ) -
    Purpose: -
    Determines whether datatype is a variable-length string. -
    Description: -
    H5Tvlen_create determines whether the datatype - identified in dtype_id is a variable-length string. -

    - This function can be used to distinguish between - fixed and variable-length string datatypes. -

    Parameters: -
      - - - -
      hid_t dtype_id    Datatype identifier.
    -
    Returns: -
    Returns TRUE or FALSE if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5tis_variable_str_f -
    -
    -SUBROUTINE h5tis_variable_str_f(type_id, status, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: type_id   ! Datatype identifier 
    -  LOGICAL, INTENT(OUT)       :: status    ! Logical flag:
    -                                          !    .TRUE. if datatype is a 
    -                                          !         varibale string
    -                                          !    .FALSE. otherwise 
    -  INTEGER, INTENT(OUT) :: hdferr          ! Error code
    -END SUBROUTINE h5tis_variable_str_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Tlock -
    Signature: -
    herr_t H5Tlock(hid_t type_id - ) -
    Purpose: -
    Locks a datatype. -
    Description: -
    H5Tlock locks the datatype specified by the - type_id identifier, making it read-only and - non-destructible. This is normally done by the library for - predefined datatypes so the application does not - inadvertently change or delete a predefined type. - Once a datatype is locked it can never be unlocked. -
    Parameters: -
      - - - -
      hid_t type_id    Identifier of datatype to lock.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: -
    None. - - - -
    - - - -
    -
    -
    Name: H5Topen -
    Signature: -
    hid_tH5Topen(hid_t loc_id, - const char * name - ) -
    Purpose: -
    Opens a named datatype. -
    Description: -
    H5Topen opens a named datatype at the location - specified by loc_id and returns an identifier - for the datatype. loc_id is either a file or - group identifier. The identifier should eventually be closed - by calling H5Tclose to release resources. -
    Parameters: -
      - - - - - - -
      hid_t loc_idIN: A file or group identifier.
      const char * name    IN: A datatype name, defined within the file - or group identified by loc_id.
    -
    Returns: -
    Returns a named datatype identifier if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5topen_f -
    -
    -SUBROUTINE h5topen_f(loc_id, name, type_id, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: loc_id    ! File or group identifier 
    -  CHARACTER(LEN=*), INTENT(IN) :: name    ! Datatype name within file or
    -                                          ! group
    -  INTEGER(HID_T), INTENT(out) :: type_id  ! Datatype identifier 
    -  INTEGER, INTENT(OUT) :: hdferr          ! Error code
    -                                          ! 0 on success and -1 on failure
    -END SUBROUTINE h5topen_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Tpack -
    Signature: -
    herr_t H5Tpack(hid_t type_id - ) -
    Purpose: -
    Recursively removes padding from within a compound datatype. -
    Description: -
    H5Tpack recursively removes padding from within a compound - datatype to make it more efficient (space-wise) to store that data. -
    Parameters: -
      - - - -
      hid_t type_id    Identifier of datatype to modify.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5tpack_f -
    -
    -SUBROUTINE h5tpack_f(type_id, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: type_id ! Datatype identifier
    -  INTEGER, INTENT(OUT) :: hdferr        ! Error code
    -END SUBROUTINE h5tpack_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Tregister -
    Signature: -
    herr_t H5Tregister(H5T_pers_t pers, - const char * name, - hid_t src_id, - hid_t dst_id, - H5T_conv_t func - ) -
    Purpose: -
    Registers a conversion function. -
    Description: -
    H5Tregister registers a hard or soft conversion function - for a datatype conversion path. -

    - The parameter pers indicates whether a conversion function - is hard (H5T_PERS_HARD) - or soft (H5T_PERS_SOFT). -

    - A conversion path can have only one hard function. - When pers is H5T_PERS_HARD, - func replaces any previous hard function. - If pers is H5T_PERS_HARD and - func is the null pointer, then any hard function - registered for this path is removed. -

    - When pers is H5T_PERS_SOFT, - H5Tregister - adds the function to the end of the master soft list and replaces - the soft function in all applicable existing conversion paths. - Soft functions are used when determining which conversion function - is appropriate for this path. -

    - The name is used only for debugging and should be a - short identifier for the function. -

    - The path is specified by the source and destination datatypes - src_id and dst_id. - For soft conversion functions, only the class of these types is important. -

    - The type of the conversion function pointer is declared as: -

    -
    typedef herr_t (*H5T_conv_t) (hid_t src_id, 
    -                              hid_t dst_id, 
    -                              H5T_cdata_t *cdata,
    -                              size_t nelmts, 
    -                              size_t buf_stride, 
    -                              size_t bkg_stride, 
    -                              void *buf, 
    -                              void *bkg,
    -                              hid_t dset_xfer_plist)
    -
    -

    - The H5T_cdata_t struct is declared as: -

    -
    typedef struct *H5T_cdata_t (H5T_cmd_t command, 
    -                             H5T_bkg_t need_bkg, 
    -                             hbool_t *recalc,
    -                             void *priv)
    -
    -

    - The H5T_conv_t parameters and - the elements of the H5T_cdata_t struct - are described more fully in the - “Data Conversion” - section of  “The Datatype Interface (H5T)” - in the HDF5 User's Guide. -

    Parameters: -
      - - - - - - - - - - - - - - - -
      H5T_pers_t persH5T_PERS_HARD for hard conversion functions; - H5T_PERS_SOFT for soft conversion functions.
      const char * nameName displayed in diagnostic output.
      hid_t src_idIdentifier of source datatype.
      hid_t dst_idIdentifier of destination datatype.
      H5T_conv_t func    Function to convert between source and destination datatypes.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: -
    None. - - - -
    - - - -
    -
    -
    Name: H5Tset_cset -
    Signature: -
    herr_t H5Tset_cset(hid_t type_id, - H5T_cset_t cset - ) -
    Purpose: -
    Sets character set to be used. -
    Description: -
    H5Tset_cset the character set to be used. -

    - HDF5 is able to distinguish between character sets of different - nationalities and to convert between them to the extent possible. - Valid character set types are: -

      -
      H5T_CSET_ASCII (0) -
      Character set is US ASCII. -
    -
    Parameters: -
      - - - - - - -
      hid_t type_idIdentifier of datatype to modify.
      H5T_cset_t cset    Character set type.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5tset_cset_f -
    -
    -SUBROUTINE h5tset_cset_f(type_id, cset, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: type_id 
    -                                  ! Datatype identifier 
    -  INTEGER, INTENT(IN) :: cset     ! Character set type of a string datatype  
    -                                  ! Possible values of padding type are:
    -                                  !    H5T_CSET_ASCII_F = 0
    -  INTEGER, INTENT(OUT) :: hdferr  ! Error code
    -END SUBROUTINE h5tset_cset_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Tset_ebias -
    Signature: -
    herr_t H5Tset_ebias(hid_t type_id, - size_t ebias - ) -
    Purpose: -
    Sets the exponent bias of a floating-point type. -
    Description: -
    H5Tset_ebias sets the exponent bias of a floating-point type. -
    Parameters: -
      - - - - - - -
      hid_t type_id    Identifier of datatype to set.
      size_t ebiasExponent bias value.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5tset_ebias_f -
    -
    -SUBROUTINE h5tset_ebias_f(type_id, ebias, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: type_id ! Datatype identifier 
    -  INTEGER, INTENT(IN) :: ebias          ! Datatype exponent bias 
    -                                        ! of a floating-point type, 
    -                                        ! which cannot be 0
    -  INTEGER, INTENT(OUT) :: hdferr        ! Error code
    -END SUBROUTINE h5tset_ebias_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Tset_fields -
    Signature: -
    herr_t H5Tset_fields(hid_t type_id, - size_t spos, - size_t epos, - size_t esize, - size_t mpos, - size_t msize - ) -
    Purpose: -
    Sets locations and sizes of floating point bit fields. -
    Description: -
    H5Tset_fields sets the locations and sizes of the various - floating-point bit fields. The field positions are bit positions in the - significant region of the datatype. Bits are numbered with the least - significant bit number zero. - -

    Fields are not allowed to extend beyond the number of bits of - precision, nor are they allowed to overlap with one another. -

    Parameters: -
      - - - - - - - - - - - - - - - - - - -
      hid_t type_id    Identifier of datatype to set.
      size_t sposSign position, i.e., the bit offset of the floating-point - sign bit.
      size_t eposExponent bit position.
      size_t esizeSize of exponent in bits.
      size_t mposMantissa bit position.
      size_t msizeSize of mantissa in bits.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5tset_fields_f -
    -
    -SUBROUTINE h5tset_fields_f(type_id, epos, esize, mpos, msize, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: type_id ! Datatype identifier
    -  INTEGER, INTENT(IN) :: epos           ! Exponent bit-position 
    -  INTEGER, INTENT(IN) :: esize          ! Size of exponent in bits
    -  INTEGER, INTENT(IN) :: mpos           ! Mantissa bit-position 
    -  INTEGER, INTENT(IN) :: msize          ! Size of mantissa in bits
    -  INTEGER, INTENT(OUT) :: hdferr        ! Error code
    -END SUBROUTINE h5tset_fields_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Tset_inpad -
    Signature: -
    herr_t H5Tset_inpad(hid_t type_id, - H5T_pad_t inpad - ) -
    Purpose: -
    Fills unused internal floating point bits. -
    Description: -
    If any internal bits of a floating point type are unused - (that is, those significant bits which are not part of the - sign, exponent, or mantissa), then H5Tset_inpad will be filled - according to the value of the padding value property inpad. - Valid padding types are: -
      -
      H5T_PAD_ZERO (0) -
      Set background to zeros. -
      H5T_PAD_ONE (1) -
      Set background to ones. -
      H5T_PAD_BACKGROUND (2) -
      Leave background alone. -
    -
    Parameters: -
      - - - - - - -
      hid_t type_idIdentifier of datatype to modify.
      H5T_pad_t pad    Padding type.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5tset_inpad_f -
    -
    -SUBROUTINE h5tset_inpad_f(type_id, padtype, hdferr)
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: type_id 
    -                                  ! Datatype identifier 
    -  INTEGER, INTENT(IN) :: padtype  ! Padding type for unused bits 
    -                                  ! in floating-point datatypes.
    -                                  ! Possible values of padding type are:
    -                                  !    H5T_PAD_ZERO_F = 0
    -                                  !    H5T_PAD_ONE_F = 1
    -                                  !    H5T_PAD_BACKGROUND_F = 2
    -  INTEGER, INTENT(OUT) :: hdferr  ! Error code
    -END SUBROUTINE h5tset_inpad_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Tset_norm -
    Signature: -
    herr_t H5Tset_norm(hid_t type_id, - H5T_norm_t norm - ) -
    Purpose: -
    Sets the mantissa normalization of a floating-point datatype. -
    Description: -
    H5Tset_norm sets the mantissa normalization of - a floating-point datatype. Valid normalization types are: -
      -
      H5T_NORM_IMPLIED (0) -
      MSB of mantissa is not stored, always 1 -
      H5T_NORM_MSBSET (1) -
      MSB of mantissa is always 1 -
      H5T_NORM_NONE (2) -
      Mantissa is not normalized -
    -
    Parameters: -
      - - - - - - -
      hid_t type_idIdentifier of datatype to set.
      H5T_norm_t norm    Mantissa normalization type.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5tset_norm_f -
    -
    -SUBROUTINE h5tset_norm_f(type_id, norm, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: type_id  
    -                                  ! Datatype identifier 
    -  INTEGER, INTENT(IN) :: norm     ! Mantissa normalization of a 
    -                                  ! floating-point datatype
    -                                  ! Valid normalization types are:
    -                                  !    H5T_NORM_IMPLIED_F(0)
    -                                  !       MSB of mantissa is not stored,
    -                                  !       always 1 
    -                                  !    H5T_NORM_MSBSET_F(1) 
    -                                  !       MSB of mantissa is always 1 
    -                                  !    H5T_NORM_NONE_F(2)
    -                                  !       Mantissa is not normalized
    -  INTEGER, INTENT(OUT) :: hdferr  ! Error code
    -END SUBROUTINE h5tset_norm_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Tset_offset -
    Signature: -
    herr_t H5Tset_offset(hid_t type_id, - size_t offset - ) -
    Purpose: -
    Sets the bit offset of the first significant bit. -
    Description: -
    H5Tset_offset sets the bit offset of the first significant bit. The - significant bits of an atomic datum can be offset from the beginning of - the memory for that datum by an amount of padding. The `offset' - property specifies the number of bits of padding that appear to the - "right of" the value. That is, if we have a 32-bit datum with 16-bits - of precision having the value 0x1122 then it will be laid out in - memory as (from small byte address toward larger byte addresses): -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Byte PositionBig-Endian Offset=0Big-Endian Offset=16Little-Endian Offset=0Little-Endian Offset=16
    0:[ pad][0x11][0x22][ pad]
    1:[ pad][0x22][0x11][ pad]
    2:[0x11][ pad][ pad][0x22]
    3:[0x22][ pad][ pad][0x11]
    - -

    If the offset is incremented then the total size is -incremented also if necessary to prevent significant bits of -the value from hanging over the edge of the datatype. - -

    The offset of an H5T_STRING cannot be set to anything but -zero. -

    Parameters: -
      - - - - - - -
      hid_t type_id    Identifier of datatype to set.
      size_t offsetOffset of first significant bit.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5tset_offset_f -
    -
    -SUBROUTINE h5tset_offset_f(type_id, offset, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: type_id ! Datatype identifier 
    -  INTEGER, INTENT(IN) :: offset         ! Datatype bit offset of 
    -                                        ! the first significant bit
    -  INTEGER, INTENT(OUT) :: hdferr        ! Error code
    -END SUBROUTINE h5tset_offset_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Tset_order -
    Signature: -
    herr_t H5Tset_order(hid_t type_id, - H5T_order_torder - ) -
    Purpose: -
    Sets the byte ordering of an atomic datatype. -
    Description: -
    H5Tset_order sets the byte ordering of an atomic datatype. - Byte orderings currently supported are: -
      -
      H5T_ORDER_LE (0) -
      Little-endian byte ordering (default). -
      H5T_ORDER_BE (1) -
      Big-endian byte ordering. -
      H5T_ORDER_VAX (2) -
      VAX mixed byte ordering (not currently supported). -
    -
    Parameters: -
      - - - - - - -
      hid_t type_idIdentifier of datatype to set.
      H5T_order_t order    Byte ordering constant.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5tset_order_f -
    -
    -SUBROUTINE h5tset_order_f(type_id, order, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: type_id   ! Datatype identifier 
    -  INTEGER, INTENT(IN) :: order            ! Datatype byte order 
    -                                          ! Possible values are:
    -                                          !    H5T_ORDER_LE_F 
    -                                          !    H5T_ORDER_BE_F 
    -                                          !    H5T_ORDER_VAX_F  
    -  INTEGER, INTENT(OUT) :: hdferr          ! Error code
    -                                          ! 0 on success and -1 on failure
    -END SUBROUTINE h5tset_order_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Tset_overflow -
    Signature: -
    herr_t H5Tset_overflow(H5T_overflow_t func) -
    Purpose: -
    Sets the overflow handler to a specified function. -
    Description: -
    H5Tset_overflow sets the overflow handler - to be the function specified by func. - func will be called for all datatype conversions that - result in an overflow. -

    - See the definition of H5T_overflow_t in - H5Tpublic.h for documentation - of arguments and return values. - The prototype for H5T_overflow_t is as follows:
    - herr_t (*H5T_overflow_t)(hid_t src_id, hid_t dst_id, - void *src_buf, void *dst_buf); - -

    - The NULL pointer may be passed to remove the overflow handler. -

    Parameters: -
      - - - -
      H5T_overflow_t func    Overflow function.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: -
    None. - - - -
    - - - -
    -
    -
    Name: H5Tset_pad -
    Signature: -
    herr_t H5Tset_pad(hid_t type_id, - H5T_pad_t lsb, - H5T_pad_t msb - ) -
    Purpose: -
    Sets the least and most-significant bits padding types. -
    Description: -
    H5Tset_pad sets the least and most-significant bits padding types. -
      -
      H5T_PAD_ZERO (0) -
      Set background to zeros. -
      H5T_PAD_ONE (1) -
      Set background to ones. -
      H5T_PAD_BACKGROUND (2) -
      Leave background alone. -
    -
    Parameters: -
      - - - - - - - - - -
      hid_t type_idIdentifier of datatype to set.
      H5T_pad_t lsb    Padding type for least-significant bits.
      H5T_pad_t msbPadding type for most-significant bits.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5tset_pad_f -
    -
    -SUBROUTINE h5tset_pad_f(type_id, lsbpad, msbpad, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: type_id ! Datatype identifier 
    -  INTEGER, INTENT(IN) :: lsbpad         ! Padding type of the 
    -                                        ! least significant bit
    -  INTEGER, INTENT(IN) :: msbpad         ! Padding type of the 
    -                                        ! most significant bit
    -                                        ! Possible values of padding 
    -                                        ! type are:
    -                                        !    H5T_PAD_ZERO_F = 0
    -                                        !    H5T_PAD_ONE_F = 1
    -                                        !    H5T_PAD_BACKGROUND_F = 2
    -                                        !    H5T_PAD_ERROR_F = -1
    -                                        !    H5T_PAD_NPAD_F = 3
    -  INTEGER, INTENT(OUT) :: hdferr        ! Error code
    -END SUBROUTINE h5tset_pad_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Tset_precision -
    Signature: -
    herr_t H5Tset_precision(hid_t type_id, - size_tprecision - ) -
    Purpose: -
    Sets the precision of an atomic datatype. -
    Description: -
    H5Tset_precision sets the precision of an atomic datatype. - The precision is the number of significant bits which, unless padding - is present, is 8 times larger than the value returned by H5Tget_size(). -

    If the precision is increased then the offset is decreased and then - the size is increased to insure that significant bits do not "hang - over" the edge of the datatype. -

    Changing the precision of an H5T_STRING automatically changes the - size as well. The precision must be a multiple of 8. -

    When decreasing the precision of a floating point type, set the - locations and sizes of the sign, mantissa, and exponent fields - first. -

    Parameters: -
      - - - - - - -
      hid_t type_idIdentifier of datatype to set.
      size_t precision    Number of bits of precision for datatype.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5tset_precision_f -
    -
    -SUBROUTINE h5tset_precision_f(type_id, precision, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: type_id ! Datatype identifier 
    -  INTEGER, INTENT(IN) :: precision      ! Datatype precision
    -  INTEGER, INTENT(OUT) :: hdferr        ! Error code
    -END SUBROUTINE h5tset_precision_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Tset_sign -
    Signature: -
    herr_t H5Tset_sign(hid_t type_id, - H5T_sign_t sign - ) -
    Purpose: -
    Sets the sign property for an integer type. -
    Description: -
    H5Tset_sign sets the sign property for an integer type. -
    -
    H5T_SGN_NONE (0) -
    Unsigned integer type. -
    H5T_SGN_2 (1) -
    Two's complement signed integer type. -
    -
    Parameters: -
      - - - - - - -
      hid_t type_idIdentifier of datatype to set.
      H5T_sign_t sign    Sign type.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5tset_sign_f -
    -
    -SUBROUTINE h5tset_sign_f(type_id, sign, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: type_id  
    -                                  ! Datatype identifier 
    -  INTEGER, INTENT(IN) :: sign     ! Sign type for an integer type 
    -                                  ! Possible values are:
    -                                  !    Unsigned integer type 
    -                                  !       H5T_SGN_NONE_F = 0
    -                                  !    Two's complement signed integer type
    -                                  !       H5T_SGN_2_F = 1
    -                                  !    or error value 
    -                                  !       H5T_SGN_ERROR_F=-1 
    -  INTEGER, INTENT(OUT) :: hdferr  ! Error code
    -END SUBROUTINE h5tset_sign_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Tset_size -
    Signature: -
    herr_t H5Tset_size(hid_t type_id, - size_tsize - ) -
    Purpose: -
    Sets the total size for an atomic datatype. -
    Description: -
    H5Tset_size sets the total size in bytes, - size, for a datatype. If the datatype is atomic and size - is decreased so that the significant bits of the datatype extend beyond - the edge of the new size, then the `offset' property is decreased - toward zero. If the `offset' becomes zero and the significant - bits of the datatype still hang over the edge of the new size, then - the number of significant bits is decreased. - The size set for a string should include space for the null-terminator - character, otherwise it will not be stored on (or retrieved from) disk. - Adjusting the size of a string automatically sets the precision - to 8*size. A compound datatype may increase in size, - but may not shrink. All datatypes must have a positive size. -
    Parameters: -
      - - - - - - -
      hid_t type_id    Identifier of datatype to change size.
      size_t sizeSize in bytes to modify datatype.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5tset_size_f -
    -
    -SUBROUTINE h5tset_size_f(type_id, size, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: type_id  ! Datatype identifier 
    -  INTEGER(SIZE_T), INTENT(IN) :: size    ! Datatype size
    -  INTEGER, INTENT(OUT) :: hdferr         ! Error code
    -                                         ! 0 on success and -1 on failure
    -END SUBROUTINE h5tset_size_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Tset_strpad -
    Signature: -
    herr_t H5Tset_strpad(hid_t type_id, - H5T_str_t strpad - ) -
    Purpose: -
    Defines the storage mechanism for character strings. -
    Description: -
    H5Tset_strpad defines the storage mechanism for the string. -

    - The method used to store character strings differs with the - programming language: -

      -
    • C usually null terminates strings while -
    • Fortran left-justifies and space-pads strings. -
    - Valid string padding values, as passed in the parameter - strpad, are as follows: -
      -
      H5T_STR_NULLTERM (0) -
      Null terminate (as C does) -
      H5T_STR_NULLPAD (1) -
      Pad with zeros -
      H5T_STR_SPACEPAD (2) -
      Pad with spaces (as FORTRAN does) -
    -

    - When converting from a longer string to a shorter string, - the behavior is as follows. - If the short string is H5T_STR_NULLPAD or - H5T_STR_SPACEPAD, then the string is simply truncated. - If the short string is H5T_STR_NULLTERM, it is - truncated and a null terminator is appended. -

    - When converting from a shorter string to a longer string, - the long string is padded on the end by appending nulls or spaces. - - -

    Parameters: -
      - - - - - - -
      hid_t type_idIdentifier of datatype to modify.
      H5T_str_t strpad    String padding type.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5tset_strpad_f -
    -
    -SUBROUTINE h5tset_strpad_f(type_id, strpad, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: type_id 
    -                                 ! Datatype identifier 
    -  INTEGER, INTENT(IN) :: strpad  ! String padding method for a string datatype 
    -                                 ! Possible values of padding type are:
    -                                 !    Pad with zeros (as C does): 
    -                                 !       H5T_STR_NULLPAD_F(0)
    -                                 !    Pad with spaces (as FORTRAN does): 
    -                                 !       H5T_STR_SPACEPAD_F(1)
    -  INTEGER, INTENT(OUT) :: hdferr ! Error code
    -END SUBROUTINE h5tset_strpad_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Tset_tag -
    Signature: -
    herr_t H5Tset_tag(hid_t type_id - const char *tag - ) -
    Purpose: -
    Tags an opaque datatype. -
    Description: -
    H5Tset_tag tags an opaque datatype type_id - with a descriptive ASCII identifier, tag. -
    Parameters: -
      - - - - - - -
      hid_t type_idIN: Datatype identifier for the opaque datatype to be tagged.
      const char *tag    IN: Descriptive ASCII string with which the - opaque datatype is to be tagged.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5tset_tag_f -
    -
    -SUBROUTINE h5tset_tag_f(type_id, tag, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: type_id ! Datatype identifier 
    -  CHARACTER(LEN=*), INTENT(IN) :: tag   ! Unique ASCII string with which the
    -                                        ! opaque datatype is to be tagged 
    -  INTEGER, INTENT(OUT) :: hdferr        ! Error code
    -END SUBROUTINE h5tset_tag_f
    -	
    - - -
    - - - -
    -
    -
    Name: H5Tunregister -
    Signature: -
    herr_t H5Tunregister(H5T_conv_t func - ) -
    Purpose: -
    Removes a conversion function from all conversion paths. -
    Description: -
    H5Tunregister removes a conversion function from all conversion paths. -

    - The conversion function pointer type declaration is described in - H5Tregister. -

    Parameters: -
      - - - -
      H5T_conv_t func    Function to remove from conversion paths.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: -
    None. - - - -
    - - - -
    -
    -
    Name: H5Tvlen_create -
    Signature: -
    hid_t H5Tvlen_create(hid_t base_type_id - ) -
    Purpose: -
    Creates a new variable-length datatype. -
    Description: -
    H5Tvlen_create creates a new variable-length (VL) datatype. -

    - The base datatype will be the datatype that the sequence is composed of, - characters for character strings, vertex coordinates for polygon lists, etc. - The base type specified for the VL datatype can be of any HDF5 datatype, - including another VL datatype, a compound datatype or an atomic datatype. -

    - When necessary, use H5Tget_super to determine the base type - of the VL datatype. -

    - The datatype identifier returned from this function should be - released with H5Tclose or resource leaks will result. -

    Parameters: -
      - - - -
      hid_t base_type_id    Base type of datatype to create.
    -
    See Also: -
    H5Dget_vlen_buf_size -
    H5Dvlen_reclaim -
    Returns: -
    Returns datatype identifier if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5tvlen_create_f -
    -
    -SUBROUTINE h5tvlen_create_f(type_id, vltype_id, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER(HID_T), INTENT(IN) :: type_id    ! Datatype identifier of base type 
    -                                           ! Base type can only be atomic 
    -  INTEGER(HID_T), INTENT(OUT) :: vltype_id ! VL datatype identifier 
    -  INTEGER, INTENT(OUT) :: hdferr           ! Error code
    -END SUBROUTINE h5tvlen_create_f
    -	
    - - -
    - -
    -
    - - - -
    -HDF5 documents and links 
    -Introduction to HDF5 
    -HDF5 User Guide 
    - -
    -And in this document, the -HDF5 Reference Manual   -
    -H5IM   -H5LT   -H5PT   -H5TB   -
    -H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5I   -H5P   -
    -H5R   -H5S   -H5T   -H5Z   -Tools   -Datatypes   -
    -
    -
    -HDF Help Desk -
    -Describes HDF5 Release 1.7, the unreleased development branch; working toward HDF5 Release 1.8.0 -
    - - - diff --git a/doc/html/RM_H5Z.html b/doc/html/RM_H5Z.html deleted file mode 100644 index 0d46864..0000000 --- a/doc/html/RM_H5Z.html +++ /dev/null @@ -1,655 +0,0 @@ - - - -HDF5/H5Z API Specification - - - - - - - - - - - - - -
    -
    - - - -
    -HDF5 documents and links 
    -Introduction to HDF5 
    -HDF5 User Guide 
    - -
    -And in this document, the -HDF5 Reference Manual   -
    -H5IM   -H5LT   -H5PT   -H5TB   -
    -H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5I   -H5P   -
    -H5R   -H5S   -H5T   -H5Z   -Tools   -Datatypes   -
    -
    -
    -

    H5Z: Filter and Compression Interface

    -
    - -

    Filter and Compression API Functions

    - -These functions enable the user to configure new filters -for the local environment. - - - -
    - -       - -       - -
    - -
    -The FORTRAN90 Interfaces: -
    -In general, each FORTRAN90 subroutine performs exactly the same task -as the corresponding C function. -
    - - - -
    - -       - -       - -
    - -

    -HDF5 supports a filter pipeline that provides the capability for standard -and customized raw data processing during I/O operations. -HDF5 is distributed with a small set of standard filters such as -compression (gzip, SZIP, and a shuffling algorithm) and -error checking (Fletcher32 checksum). -For further flexibility, the library allows a -user application to extend the pipeline through the -creation and registration of customized filters. -

    -The flexibility of the filter pipeline implementation enables the -definition of additional filters by a user application. -A filter -

      -
    • is associated with a dataset when the dataset is created, -
    • can be used only with chunked data -
      (i.e., datasets stored in the H5D_CHUNKED - storage layout), and -
    • is applied independently to each chunk of the dataset. -
    -

    -The HDF5 library does not support filters for contiguous datasets -because of the difficulty of implementing random access for partial I/O. -Compact dataset filters are not supported because it would not produce -significant results. -

    -Filter identifiers for the filters distributed with the HDF5 Library -are as follows: - - - -
    - H5Z_FILTER_DEFLATEThe gzip compression, - or deflation, filter -
    - H5Z_FILTER_SZIPThe SZIP compression filter -
    - H5Z_FILTER_SHUFFLEThe shuffle algorithm filter -
    - H5Z_FILTER_FLETCHER32  The Fletcher32 checksum, - or error checking, filter -
    -Custom filters that have been registered with the library will have -additional unique identifiers. -

    -See The Dataset Interface (H5D) -in the HDF5 User's Guide for further information regarding -data compression. - - - - - -


    -
    -
    Name: H5Zfilter_avail -
    Signature: -
    herr_t H5Zfilter_avail(H5Z_filter_t filter) -
    Purpose: -
    Determines whether a filter is available. -
    Description: -
    H5Zfilter_avail determines whether the filter - specified in filter is available to the application. -
    Parameters: -
      - - - -
      H5Z_filter_t filter    IN: Filter identifier. - See the introduction to this section of the reference manual - for a list of valid filter identifiers.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5zfilter_avail_f -
    -
    -SUBROUTINE h5zfilter_avail_f(filter, status, hdferr)
    -  IMPLICIT NONE
    -  INTEGER, INTENT(IN)  :: filter     ! Filter
    -                                     ! Valid values are:
    -                                     !    H5Z_FILTER_DEFLATE_F
    -                                     !    H5Z_FILTER_SHUFFLE_F
    -                                     !    H5Z_FILTER_FLETCHER32_F
    -                                     !    H5Z_FILTER_SZIP_F
    -  LOGICAL, INTENT(OUT) :: status     ! Flag indicating whether 
    -                                     ! filter is available: 
    -                                     !    .TRUE.
    -                                     !    .FALSE.
    -END SUBROUTINE h5zfilter_avail_f
    -	
    - - -
    - - - - -
    -
    -
    Name: H5Zget_filter_info -
    Signature: -
    herr_t - H5Zget_filter_info( - H5Z_filter_t filter, - unsigned int *filter_config_flags - ) -
    Purpose: -
    Retrieves information about a filter. -
    Description: -
    - H5Zget_filter_info retrieves information about a filter. - At present, this means that the function retrieves a - filter's configuration flags, indicating whether the filter is - configured to decode data, to encode data, neither, or both. -

    - If filter_config_flags is not set to NULL - prior to the function call, the returned parameter contains a - bit field specifying the available filter configuration. - The configuration flag values can then be determined through - a series of bitwise AND operations, as described below. -

    - Valid filter configuration flags include the following: - - - - - - - - - -
        H5Z_FILTER_CONFIG_ENCODE_ENABLEDEncoding is enabled for this filter -
     H5Z_FILTER_CONFIG_DECODE_ENABLED    Decoding is enabled for this filter -
     (These flags - are defined in the HDF5 Library source code file - H5Zpublic.h.) -
    - A bitwise AND of the returned - filter_config_flags and a valid - filter configuration flag will reveal whether - the related configuration option is available. - For example, if the value of -
    -      - H5Z_FILTER_CONFIG_ENCODE_ENABLED - & - filter_config_flags -
    - is true, i.e., greater than 0 (zero), - the queried filter is configured to encode data; - if the value is FALSE, - i.e., equal to 0 (zero), - the filter is not so configured. -

    - If a filter is not encode-enabled, the corresponding - H5Pset_* function will return an error if the - filter is added to a dataset creation property list (which is - required if the filter is to be used to encode that dataset). - For example, if the H5Z_FILTER_CONFIG_ENCODE_ENABLED - flag is not returned for the SZIP filter, - H5Z_FILTER_SZIP, a call to H5Pset_szip - will fail. -

    - If a filter is not decode-enabled, the application will not be - able to read an existing file encoded with that filter. -

    - This function should be called, and the returned - filter_config_flags analyzed, before calling - any other function, such as H5Pset_szip, - that might require a particular filter configuration. - -

    Parameters: -
    -
    H5Z_filter_t filter -
    IN: Identifier of the filter to query. - See the introduction to this section of the reference manual - for a list of valid filter identifiers. -
    unsigned int *filter_config_flags -
    OUT: A bit field encoding the returned filter information -
    -
    Returns: -
    Returns a non-negative value on success, - a negative value on failure. - - -
    Fortran90 Interface: -
    -
    -SUBROUTINE h5zget_filter_info_f(filter, config_flags, hdferr)
    -
    -  IMPLICIT NONE
    -  INTEGER, INTENT(IN)  :: filter        ! Filter, may be one of the
    -                                        ! following:
    -                                        !     H5Z_FILTER_DEFLATE_F
    -                                        !     H5Z_FILTER_SHUFFLE_F
    -                                        !     H5Z_FILTER_FLETCHER32_F
    -                                        !     H5Z_FILTER_SZIP_F
    -  INTEGER, INTENT(OUT) :: config_flags  ! Bit field indicating whether
    -                                        ! a filter's encoder and/or
    -                                        ! decoder are available
    -  INTEGER, INTENT(OUT) :: hdferr        ! Error code
    -
    -END SUBROUTINE h5zfilter_avail_f
    -    
    - -
    - - - - -
    -
    -
    Name: H5Zregister -
    Signature: -
    herr_t H5Zregister(const H5Z_class_t filter_class) - ) -
    Purpose: -
    Registers new filter. -
    Description: -
    H5Zregister registers a new filter with the - HDF5 library. -

    - Making a new filter available to an application is a two-step - process. The first step is to write - the three filter callback functions described below: - can_apply_func, set_local_func, and - filter_func. - This call to H5Zregister, - registering the filter with the - library, is the second step. - The can_apply_func and set_local_func - fields can be set to NULL - if they are not required for the filter being registered. -

    - H5Zregister accepts a single parameter, - the filter_class data structure, - which is defined as follows: -

    -       typedef struct H5Z_class_t {
    -           H5Z_filter_t filter_id;
    -           const char  *comment;
    -           H5Z_can_apply_func_t can_apply_func;
    -           H5Z_set_local_func_t set_local_func;
    -           H5Z_func_t filter_func;            
    -       } H5Z_class_t;
    -      
    - -

    - filter_id is the identifier for the new filter. - This is a user-defined value between - H5Z_FILTER_RESERVED and H5Z_FILTER_MAX, - both of which are defined in the HDF5 source file - H5Zpublic.h. -

    - comment is used for debugging, - may contain a descriptive name for the filter, - and may be the null pointer. -

    - can_apply_func, described in detail below, - is a user-defined callback function which determines whether - the combination of the dataset creation property list values, - the datatype, and the dataspace represent a valid combination - to apply this filter to. -

    - set_local_func, described in detail below, - is a user-defined callback function which sets any parameters that - are specific to this dataset, based on the combination of the - dataset creation property list values, the datatype, and the - dataspace. -

    - filter_func, described in detail below, - is a user-defined callback function which performs the action - of the filter. -

    - The statistics associated with a filter are not reset - by this function; they accumulate over the life of the library. - -

    - The callback functions -
    - Before H5Zregister can link a filter into an - application, three callback functions must be defined - as described in the HDF5 Library header file H5Zpublic.h. - -

    - The can apply callback function is defined as follows:
    -

    - typedef herr_t (*H5Z_can_apply_func_t) - (hid_t dcpl_id, - hid_t type_id, - hid_t space_id) - -

    - Before a dataset is created, the can apply callbacks for - any filters used in the dataset creation property list are called - with the dataset's dataset creation property list, dcpl_id, - the dataset's datatype, type_id, and - a dataspace describing a chunk, space_id, - (for chunked dataset storage). -

    - This callback must determine whether the combination of the - dataset creation property list settings, the datatype, and the - dataspace represent a valid combination to which to apply this filter. - For example, an invalid combination may involve - the filter not operating correctly on certain datatypes, - on certain datatype sizes, or on certain sizes of the chunk dataspace. -

    - This callback can be the NULL pointer, in which case - the library will assume that the filter can be applied to a dataset with - any combination of dataset creation property list values, datatypes, - and dataspaces. -

    - The can apply callback function must return - a positive value for a valid combination, - zero for an invalid combination, and - a negative value for an error. - -

    - The set local callback function is defined as follows:
    -

    - typedef herr_t (*H5Z_set_local_func_t) - (hid_t dcpl_id, - hid_t type_id, - hid_t space_id) - -

    - After the can apply callbacks are checked for a new dataset, - the set local callback functions for any filters used in the - dataset creation property list are called. - These callbacks receive - dcpl_id, the dataset's private copy of the dataset - creation property list passed in to H5Dcreate - (i.e. not the actual property list passed in to H5Dcreate); - type_id, the datatype identifier passed in to - H5Dcreate, - which is not copied and should not be modified; and - space_id, a dataspace describing the chunk - (for chunked dataset storage), which should also not be modified. -

    - The set local callback must set any filter parameters that are - specific to this dataset, based on the combination of the - dataset creation property list values, the datatype, and the dataspace. - For example, some filters perform different actions based on - different datatypes, datatype sizes, numbers of dimensions, - or dataspace sizes. -

    - The set local callback may be the NULL pointer, - in which case, the library will assume that there are - no dataset-specific settings for this filter. -

    - The set local callback function must return - a non-negative value on success and - a negative value for an error. - -

    - The filter operation callback function, - defining the filter's operation on the data, is defined as follows: -

    - typedef size_t (*H5Z_func_t) - (unsigned int flags, - size_t cd_nelmts, - const unsigned int cd_values[], - size_t nbytes, - size_t *buf_size, - void **buf) - - -

    - The parameters flags, cd_nelmts, - and cd_values are the same as for the function - H5Pset_filter. - The one exception is that an additional flag, - H5Z_FLAG_REVERSE, is set when - the filter is called as part of the input pipeline. -

    - The parameter *buf points to the input buffer - which has a size of *buf_size bytes, - nbytes of which are valid data. -

    - The filter should perform the transformation in place if - possible. If the transformation cannot be done in place, - then the filter should allocate a new buffer with - malloc() and assign it to *buf, - assigning the allocated size of that buffer to - *buf_size. - The old buffer should be freed by calling free(). -

    - If successful, the filter operation callback function - returns the number of valid bytes of data contained in *buf. - In the case of failure, the return value is 0 (zero) - and all pointer arguments are left unchanged. -

    Note: -
    The H5Zregister interface is substantially revised - from the HDF5 Release 1.4.x series. - The H5Z_class_t struct and - the set local and can apply callback functions - first appeared in HDF5 Release 1.6. -
    Parameters: -
      - - - -
      const H5Z_class_t filter_class    IN: Struct containing filter-definition information.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: -
    None. - - - -
    - - - -
    -
    -
    Name: H5Zunregister -
    Signature: -
    herr_t H5Zunregister(H5Z_filter_t filter) -
    Purpose: -
    Unregisters a filter. -
    Description: -
    H5Zunregister unregisters the filter - specified in filter.   -

    - After a call to H5Zunregister, the filter - specified in filter will no longer be - available to the application. -

    Parameters: -
      - - - -
      H5Z_filter_t filter    IN: Identifier of the filter to be unregistered. - See the introduction to this section of the reference manual - for a list of identifiers for standard filters - distributed with the HDF5 Library.
    -
    Returns: -
    Returns a non-negative value if successful; - otherwise returns a negative value. -
    Fortran90 Interface: h5zunregister_f -
    -
    -SUBROUTINE h5zunregister_f(filter, hdferr) 
    -  IMPLICIT NONE
    -  INTEGER, INTENT(IN)  :: filter  ! Filter; one of the possible values:
    -                                  !    H5Z_FILTER_DEFLATE_F
    -                                  !    H5Z_FILTER_SHUFFLE_F
    -                                  !    H5Z_FILTER_FLETCHER32_F
    -                                  !    H5Z_FILTER_SZIP_F
    -  INTEGER, INTENT(OUT) :: hdferr  ! Error code
    -                                  ! 0 on success, and -1 on failure
    -END SUBROUTINE h5zunregister_f
    -	
    - - -
    - - - -
    -
    - - - -
    -HDF5 documents and links 
    -Introduction to HDF5 
    -HDF5 User Guide 
    - -
    -And in this document, the -HDF5 Reference Manual   -
    -H5IM   -H5LT   -H5PT   -H5TB   -
    -H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5I   -H5P   -
    -H5R   -H5S   -H5T   -H5Z   -Tools   -Datatypes   -
    -
    -
    -HDF Help Desk -
    -Describes HDF5 Release 1.7, the unreleased development branch; working toward HDF5 Release 1.8.0 -
    - - - diff --git a/doc/html/References.html b/doc/html/References.html deleted file mode 100644 index 766b92c..0000000 --- a/doc/html/References.html +++ /dev/null @@ -1,651 +0,0 @@ - - - -Reference (H5R) and Identifier Interfaces (H5I) - - - - - - - - - - -
    -
    - - - -
    - HDF5 documents and links 
    - Introduction to HDF5 
    - HDF5 Reference Manual 
    - HDF5 User's Guide for Release 1.6 
    - -
    - And in this document, the - HDF5 User's Guide from Release 1.4.5:     -
    - Files   - Datasets   - Datatypes   - Dataspaces   - Groups   -
    - References   - Attributes   - Property Lists   - Error Handling   -
    - Filters   - Caching   - Chunking   - Mounting Files   -
    - Performance   - Debugging   - Environment   - DDL   -
    -
    -
    -

    The Reference Interface (H5R) and
    the Identifier Interface (H5I)

    - -

    1. Introduction

    - -This document discusses the kinds of references implemented -(and planned) in HDF5 and the functions implemented (and planned) -to support them. - - -

    2. References

    - -This section contains an overview of the kinds of references -implemented, or planned for implementation, in HDF5. - -

    - -

    -
    Object reference -
    Reference to an entire object in the current HDF5 file. -

    - An object reference points to an entire object in the - current HDF5 file by storing the relative file address - (OID) of the object header for the object pointed to. - The relative file address of an object header is - constant for the life of the object. - An object reference is of a fixed size in the file. -

    -

    Dataset region reference -
    Reference to a specific dataset region. -

    - A dataset region reference points to a region of a - dataset in the current HDF5 file by storing the OID - of the dataset and the global heap offset of the - region referenced. The region referenced is located - by retrieving the coordinates of the areas in the - region from the global heap. A dataset region - reference is of a variable size in the file. -

    - - -Note: All references are treated as soft links for the -purposes of reference counting. The library does not keep track of -reference links and they may dangle if the object they refer to -is deleted, moved, or not yet available. - - -

    3. Reference Types

    - -Valid HDF5 reference types for use in the H5R functions -are as follows: - -
    - - - - - - - - - -
    Reference TypeValue  Description
    H5R_OBJECT0  Object reference
    H5R_DATASET_REGION1  Dataset region reference
    -
    - - -

    4. Functions

    - -Five functions, four in the H5R interface and one in the -H5I interface, have been implemented to support references. -The H5I function is also useful outside the context of references. -

    -

    -
    herr_t H5Rcreate(void *reference, - hid_t loc_id, - const char *name, - H5R_type_t type, - hid_t space_id) -
    H5Rcreate creates an object which is a - particular type of reference (specified with the - type parameter) to some file object and/or - location specified with the space_id parameter. - For dataset region references, the selection specified - in the dataspace is the portion of the dataset which - will be referred to. -

    - -

    hid_t H5Rdereference(hid_t dset, - H5R_type_t rtype, - void *reference) -
    H5Rdereference opens the object referenced - and returns an identifier for that object. - The parameter reference specifies a reference of - type rtype that is stored in the dataset - dset. -

    - -

    int H5Rget_object_type(hid_t obj_id, - void *reference) -
    H5Rget_object_type retrieves the type of object - that an object reference points to. - The parameter obj_id specifies the dataset - containing the reference object or the location identifier - of the object that the dataset is located within. - The parameter reference specifies the - reference being queried. -

    - -

    H5S_t H5Rget_region(H5D_t dataset, - H5R_type_t type, - void *reference) -
    H5Rget_region creates a copy of dataspace of - the dataset that is pointed to and defines a selection in - the copy which is the location (or region) pointed to. - The parameter ref specifies a reference of - type rtype that is stored in the dataset - dset. -

    - -

    H5I_type_t H5Iget_type(hid_t id) -
    Returns the type of object referred to by the - identifier id. Valid return values appear - in the following list: -
    - - - - - - - - - - - - - -
    H5I_FILEFile objects
    H5I_GROUPGroup objects
    H5I_DATATYPEDatatype objects
    H5I_DATASPACEDataspace objects
    H5I_DATASETDataset objects
    H5I_ATTRAttribute objects
    -
    -

    - This function was inspired by the need of users to figure - out which type of object closing function - (H5Dclose, H5Gclose, etc.) - to call after a call to H5Rdereference, - but it is also of general use. -

    -

    - - - -

    5. Examples

    - -Object Reference Writing Example -
    -Create a dataset which has links to other datasets as part -of its raw data and write the dataset to the file. -

    - -

    -{
    -    hid_t file1;
    -    hid_t dataset1;
    -    hid_t datatype, dataspace;
    -    char buf[128];
    -    hobj_ref_t link;
    -    hobj_ref_t data[10][10];
    -    int rank;
    -    size_t dimsf[2];
    -    int i, j;
    -
    -    /* Open the file */
    -    file1=H5Fopen("example.h5", H5F_ACC_RDWR, H5P_DEFAULT);
    -
    -    /* Describe the size of the array and create the data space */
    -    rank=2;
    -    dimsf[0] = 10;
    -    dimsf[1] = 10;
    -    dataspace = H5Screate_simple(rank, dimsf, NULL); 
    -
    -    /* Define datatype */
    -    datatype = H5Tcopy(H5T_STD_REF_OBJ);
    -
    -    /* Create a dataset */
    -    dataset1=H5Dcreate(file1,"Dataset One",datatype,dataspace,H5P_DEFAULT);
    -
    -    /* Construct array of OIDs for other datasets in the file */
    -    /* somewhat hokey and artificial, but demonstrates the point */
    -    for(i=0; i<10; i++)
    -        for(j=0; j<10; j++)
    -          {
    -            sprintf(buf,"/Group/Linked Set %d-%d",i,j);
    -            if(H5Rcreate(&link,file1,buf,H5R_OBJECT,-1)>0)
    -                data[i][j]=link;
    -          } /* end for */
    -
    -    /* Write the data to the dataset using default transfer properties.  */
    -    H5Dwrite(dataset, H5T_STD_REF_OBJ, H5S_ALL, H5S_ALL, H5P_DEFAULT, data);
    -
    -    /* Close everything */
    -    H5Sclose(dataspace);
    -    H5Tclose(datatype);
    -    H5Dclose(dataset1);
    -    H5Fclose(file1);
    -}
    -
    - - -Object Reference Reading Example -
    -Open a dataset which has links to other datasets as part of -its raw data and read in those links. -

    - -

    -{
    -    hid_t file1;
    -    hid_t dataset1, tmp_dset;
    -    href_t data[10][10];
    -    int i, j;
    -
    -    /* Open the file */
    -    file1=H5Fopen("example.h5", H5F_ACC_RDWR, H5P_DEFAULT);
    -
    -    /* Open the dataset */
    -    dataset1=H5Dopen(file1,"Dataset One",H5P_DEFAULT);
    -
    -    /* 
    -     * Read the data to the dataset using default transfer properties.
    -     * (we are assuming the dataset is the same and not querying the
    -     *  dimensions, etc.)
    -     */
    -    H5Dread(dataset, H5T_STD_REF_OBJ, H5S_ALL, H5S_ALL, H5P_DEFAULT, data);
    -
    -    /* Analyze array of OIDs of linked datasets in the file */
    -    /* somewhat hokey and artificial, but demonstrates the point */
    -    for(i=0; i<10; i++)
    -        for(j=0; j<10; i++)
    -          {
    -            if((tmp_dset=H5Rdereference(dataset, H5T_STD_REF_OBJ, data[i][j]))>0)
    -              {
    -                  
    -              } /* end if */
    -            H5Dclose(tmp_dset);
    -          } /* end for */
    -
    -
    -    /* Close everything */
    -    H5Dclose(dataset1);
    -    H5Fclose(file1);
    -}
    -
    - - -Dataset Region Reference Writing Example -
    -Create a dataset which has links to other dataset regions -(single elements in this case) as part of its raw data and -write the dataset to the file. -

    - -

    -{
    -    hid_t file1;
    -    hid_t dataset1, dataset2;
    -    hid_t datatype, dataspace1, dataspace2;
    -    char buf[128];
    -    href_t link;
    -    href_t data[10][10];     /* HDF5 reference type */
    -    int rank;
    -    size_t dimsf[2];
    -    hsize_t start[3],count[3];
    -    int i, j;
    -
    -    /* Open the file */
    -    file1=H5Fopen("example.h5", H5F_ACC_RDWR, H5P_DEFAULT);
    -
    -    /* Describe the size of the array and create the data space */
    -    rank=2;
    -    dimsf[0] = 10;
    -    dimsf[1] = 10;
    -    dataspace1 = H5Screate_simple(rank, dimsf, NULL); 
    -
    -    /* Define Dataset Region Reference datatype */
    -    datatype = H5Tcopy(H5T_STD_REF_DATAREG);
    -
    -    /* Create a dataset */
    -    dataset1=H5Dcreate(file1,"Dataset One",datatype,dataspace1,H5P_DEFAULT);
    -
    -    /* Construct array of OIDs for other datasets in the file */
    -    /* (somewhat artificial, but demonstrates the point) */
    -    for(i=0; i<10; i++)
    -        for(j=0; j<10; i++)
    -          {
    -            sprintf(buf,"/Group/Linked Set %d-%d",i,j);
    -            
    -            /* Get the dataspace for the object to point to */
    -            dataset2=H5Dopen(file1,buf,H5P_DEFAULT);
    -            dataspace2=H5Dget_space(dataspace2);
    -
    -            /* Select the region to point to */
    -            /* (could be different region for each pointer) */
    -            start[0]=5; start[1]=4; start[2]=3;
    -            count[0]=2; count[1]=4; count[2]=1;
    -            H5Sselect_hyperslab(dataspace2,H5S_SELECT_SET,start,NULL,count,NULL);
    -
    -            if(H5Rcreate(&link,file1,buf,H5R_REF_DATAREG,dataspace2)>0)
    -                /* Store the reference */
    -                data[i][j]=link;
    -
    -            H5Sclose(dataspace2);
    -            H5Dclose(dataspace2);
    -          } /* end for */
    -
    -    /* Write the data to the dataset using default transfer properties.  */
    -    H5Dwrite(dataset, H5T_STD_REF_DATAREG, H5S_ALL, H5S_ALL, H5P_DEFAULT, data);
    -
    -    /* Close everything */
    -    H5Sclose(dataspace);
    -    H5Tclose(datatype);
    -    H5Dclose(dataset1);
    -    H5Fclose(file1);
    -}
    -
    - - -Dataset Region Reference Reading Example -
    -Open a dataset which has links to other datasets regions -(single elements in this case) as part of its raw data and -read in those links. -

    - -

    -{
    -    hid_t file1;
    -    hid_t dataset1, tmp_dset;
    -    hid_t dataspace;
    -    href_t data[10][10];     /* HDF5 reference type */
    -    int i, j;
    -
    -    /* Open the file */
    -    file1=H5Fopen("example.h5", H5F_ACC_RDWR, H5P_DEFAULT);
    -
    -    /* Open the dataset */
    -    dataset1=H5Dopen(file1,"Dataset One",H5P_DEFAULT);
    -
    -    /* 
    -     * Read the data to the dataset using default transfer properties.
    -     * (we are assuming the dataset is the same and not querying the
    -     *  dimensions, etc.)
    -     */
    -    H5Dread(dataset, H5T_STD_REF_DATAREG, H5S_ALL, H5S_ALL, H5P_DEFAULT, data);
    -
    -    /* Analyze array of OIDs of linked datasets in the file */
    -    /* (somewhat artificial, but demonstrates the point) */
    -    for(i=0; i<10; i++)
    -        for(j=0; j<10; i++)
    -          {
    -            if((tmp_dset=H5Rdereference(dataset, H5D_STD_REF_DATAREG,data[i][j]))>0)
    -              {
    -                  /* Get the dataspace with the pointed to region selected */
    -                  dataspace=H5Rget_space(data[i][j]);
    -
    -                  
    -
    -                  H5Sclose(dataspace);
    -              } /* end if */
    -            H5Dclose(tmp_dset);
    -          } /* end for */
    -
    -
    -    /* Close everything */
    -    H5Dclose(dataset1);
    -    H5Fclose(file1);
    -}
    -
    - - - -
    -
    - - - -
    - HDF5 documents and links 
    - Introduction to HDF5 
    - HDF5 Reference Manual 
    - HDF5 User's Guide for Release 1.6 
    - -
    - And in this document, the - HDF5 User's Guide from Release 1.4.5:     -
    - Files   - Datasets   - Datatypes   - Dataspaces   - Groups   -
    - References   - Attributes   - Property Lists   - Error Handling   -
    - Filters   - Caching   - Chunking   - Mounting Files   -
    - Performance   - Debugging   - Environment   - DDL   -
    -
    -
    -
    -HDF Help Desk -
    -Describes HDF5 Release 1.4.5, February 2003 -
    - -Last modified: 2 August 2004 - - - diff --git a/doc/html/TechNotes.html b/doc/html/TechNotes.html deleted file mode 100644 index 28825ef..0000000 --- a/doc/html/TechNotes.html +++ /dev/null @@ -1,319 +0,0 @@ - - - - - -HDF5 Technical Notes - - - - - - - - - - - -
    -
    - - - - -
    -HDF5 documents and links 
    -Introduction to HDF5 
    - -
    -HDF5 User's Guide 
    -HDF5 Application Developer's Guide 
    -HDF5 Reference Manual 
    - - - -
    -
    -
    -
    -
    - -
    -

    HDF5 Technical Notes

    -

    Technical Notes for HDF5 Library and Driver Developers

    -
    - - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - This informal volume of technical notes is of interest to - those who develop and maintain the HDF5 library and - related, closely-coupled drivers. - These notes are not generally of interest to applications developers - and certainly not of interest to users. - (Some of these documents may be somewhat out of date as they were - working papers for the design process.) -
    -
    -
    - Memory Management -   - A discussion of memory management issues in HDF5 -
    - Memory Management and -
         - Free Lists -
         - Notes regarding the implementation of free lists and memory management -
    Heap Management -   - A discussion of the H5H heap management fuctions -
    Raw Data Storage -   - A discussion of the storage of raw HDF5 data -
    Virtual File Layer -   - A description of the HDF5 virtual file layer (VFL), - a public API for the implementation of custom I/O drivers -
    List of VFL Functions -   - A list of the VFL functions, H5FD* -
    I/O Pipeline -   - A description of the raw data I/O pipeline -
    - Large Datasets on Small -
         - Machines -
      - A guide to accessing large datasets on small computers -
    - Relocating a File Data -
         - Structure -
      - A discussion of the issues involved in moving file data structures once - they have been cached in memory -
    - Working with External Files -   - A guide to the use of multiple files with HDF5 -
    Object Headers -   - A discussion of the H5O object header functions -
    - Symbol Table Caching Issues -   - A discussion of issues involving caching of object header messages in - symbol table entries -
    - HDF4/HDF5 Compatibility -   - A discussion of compatibility issues between HDF4 and HDF5 -
    - Testing the Chunked Layout -
         - of HDF5 -
      - A white paper discussing the motivation to implement raw data chunking - in the HDF5 library -
    Library Maintenance -   - A discussion of HDF5 library maintenance issues -
    Code Review -   - Code Review 1 and 2 -
    - Release Version Numbers -   - A description of HDF5 release version numbers -
    Naming Schemes -   - A discussion of naming schemes for HDF5 library modules, functions, - datatypes, etc. -
    -Thread Safe HDF5 Library -
         - Implementation -
      - A report on the implementation of a thread safe HDF5 library. -
    -Using HDF5 with OpenMP -   - A short report on using HDF5 with OpenMP. -
    HDF5 Software Controls -   - Descriptions of the HDF5 knobs and controls, such as the - environment variables and settings that control the functionality - of the HDF5 libraries and tools. -
    Daily Test Explained -   - An explanation of the Daily Testing for HDF software conducted. -
    Test Review -   - Results of reviewing tests for API functions. -
    Basic Performance Tools -   - A description of the three basic performance tools (chunk, iopipe, overhead). -
    Variable-Length Datatype Info -   - Description of various aspects of using variable-length datatypes in HDF5. -
    Reserved File Address Space -   - Description of HDF5's internal system for ensuring that files stay within their address space. -
    Data Transform Report -   - Report of the Data Transform implementation. -
    Automake Use Cases -   - Simple explanations of how to make some common changes to HDF5's Automake-generated Makefiles.am. -
    -
    - -
    -
    - - -
    -
    - - - - -
    -HDF5 documents and links 
    -Introduction to HDF5 
    - -
    -HDF5 User's Guide 
    -HDF5 Application Developer's Guide 
    -HDF5 Reference Manual 
    - - - -
    -
    -
    -
    - - - -
    - -
    -HDF Help Desk -
    -Describes HDF5 Release 1.7, the unreleased development branch; working toward HDF5 Release 1.8.0 -
    - -Last modified: 15 December 2004 - -
    -Copyright   -
    - - - - diff --git a/doc/html/TechNotes/Automake.html b/doc/html/TechNotes/Automake.html deleted file mode 100644 index c6dfc41..0000000 --- a/doc/html/TechNotes/Automake.html +++ /dev/null @@ -1,223 +0,0 @@ - - - - -An Automake Primer for HDF5 - - -

    An Automake Primer for HDF5

    -

    James Laird - May 2005


    - -

    How to:

    - -

    Change a Makefile

    - -

    Add a source file to an existing program or library

    - -

    Add a simple test

    - -

    Add a test with multiple sources

    - -

    Add a new directory

    - -

    Add a program that is only compiled in parallel

    - -

    Change a program's name when it is compiled in parallel

    - -

    Add a new library

    - -

    Change the library's API

    -
    - -

    Changing a Makefile

    - -

    Suppose you need to make a minor change to the Makefile in the test directory -(hdf5/test/Makefile). You have checked out hdf5 from the CVS repository into -~/scratch/hdf5. You want to build the library in a directory named -~/scratch/build.
    -First, edit the Makefile.am in the source tree. You must make any changes in the Makefile.am, -not the Makefile, since the Makefile is automatically generated.

    - -

    cd ~/scratch/hdf5/test
    -vi Makefile.am

    - -

    Now, go to the root of the source tree and run the reconfigure script, which updates the -source tree. It will create a new Makefile.in in the test directory with your changes.

    - -

    cd ~/scratch/hdf5
    -./bin/reconfigure

    - -

    After running bin/reconfigure, you will want to test your change. Go to -~/scratch/build and run configure.

    - -

    cd ~/scratch/build
    - -../hdf5/configure
    - -make check

    - -

    Configure generates Makefiles from the Makefiles.in in the source tree. The dependencies are:

    - -

    Makefile.am -> (bin/reconfigure) -> Makefile.in -> (configure) -> Makefile

    - -

    Reconfigure should also be used when any change is made to configure.in.

    -
    - -

    Adding a source file to an existing program or library

    - -

    Suppose you want to add the source file h5testfoo.c to the HDF5 test -library in the test directory. You open up test/Makefile.am in your -favorite text editor and scroll down until you see the line:

    - -

    libh5test_la_SOURCES=h5test.c testframe.c

    - -

    Just add h5testfoo.c to the list of sources. You're done!
    -Now run bin/reconfigure to create a new Makefile.in from the Makefile.am you just -edited.

    -
    - -

    Adding a simple test

    - -

    Suppose you want to create a new test executable named newtest with one -source file, newtest.c. You open up test/Makefile.am and find -the line

    - -

    TEST_PROG=testhdf5 lheap ohdr ...

    - -

    Just add newtest to the list of programs. That's it!  Automake will by -default guess that your program newtest has one source file named -newtest.c.
    -Now run bin/reconfigure to update the Makefile.in.

    -
    - -

    Adding a slightly more complicated test

    - -

    Suppose you want to create a new test executable named newertest with -several source files. You open up test/Makefile.am as before and find the line

    - -

    TEST_PROG=testhdf5 lheap ohdr ...

    - -

    Add newertest to the list of programs.
    -Now you need to tell Automake how to build newertest. Add a new line below -TEST_PROG:

    - -

    newtest_SOURCES = source1.c source2.c source3.c

    - -

    You don't need to mention header files, as these will be automatically detected.
    -Now run bin/reconfigure to update the Makefile.in.

    -
    - -

    Adding a directory

    - -

    To add the directory for a new tool, h5merge, go to the Makefile.am -in the tools directory (the parent directory of the directory you want to add). -Find the line that reads

    - -

    SUBDIRS=lib h5dump...

    - -

    Add h5merge to this list of subdirectories.
    -Now you probably want to create a Makefile.am in the h5merge directory. A good starting -point for this Makefile.am might be the sample Makefile.am in the config directory -(config/Makefile.am.blank). Alternately, you could copy the Makefile.am -from another directory.
    -Once you have your new Makefile.am in place, edit configure.in in the root -directory. Near the end of the file is a list of files generated by configure. -Add tools/h5merge/Makefile.in to this list.
    -Now run bin/reconfigure. This will update configure and generate a Makefile.in in the -tools/h5merge directory. Don't forget to add both the Makefile.am and the Makefile.in to -CVS, and to update the manifest!.

    -
    - -

    Adding a program that is only compiled in parallel

    - -

    Suppose you only want to compile a program when HDF5 is configured to run in -parallel--for example, a parallel version of h5repack called h5prepack. -Open up the h5repack Makefile.am
    -The simple solution is:

    - -

    if BUILD_PARALLEL_CONDITIONAL
    -   H5PREPACK=h5prepack
    -endif

    - -

    Now the variable $H5PREPACK will be "h5prepack" if parallel is -enabled and "" if parallel is disabled. Add $H5PREPACK to the list of -programs to be built:

    - -

    bin_PROGRAMS=h5repack $(H5PREPACK)

    - -

    Add sources for this program as usual:

    - -

    h5prepack_SOURCES=...

    - -

    Don't forget to run bin/reconfigure when you're done!

    -
    - -

    Changing a program's name when it is compiled in parallel

    - -

    Automake conditionals can be a very powerful tool. Suppose that instead of building -two versions of h5repack during a parallel build, you want to change the name of -the tool depending on whether or not HDF5 is configured to run in parallel--you -want to create either h5repack or h5prepack, but not both.
    -Open up the h5repack Makefile.am and use an automake conditional:

    - -

    if BUILD_PARALLEL_CONDITIONAL
    -   H5REPACK_NAME=h5prepack
    -else
    -   H5REPACK_NAME=h5repack
    -endif
    -bin_PROGRAMS=$(H5REPACK_NAME)

    - -

    Now you only build one program, but the name of that program changes. You still need -to define sources for both h5repack and h5prepack, but you needn't type them out twice if -they are the same:

    - -

    h5repack_SOURCES=...
    -h5prepack_SOURCES=$(h5repack_SOURCES)

    - -

    Don't forget to run bin/reconfigure when you're done!

    -
    - -

    Adding a new library

    - -

    Suppose you want to add a new library to the HDF5 build tree, libfoo. The procedure for -building libraries is very similar to that for building programs:

    - -

    lib_LTLIBRARIES=libfoo.la
    -libfoo_la_SOURCES=sourcefoo.c sourcefootwo.c

    - -

    This library will be installed in the lib directory when a user types -"make install".
    -You might instead be building a convenience library for testing purposes (like -libh5test.la) and not want it to be installed. If this is the case, you -would type

    - -

    check_LTLIBRARIES=libfoo.la
    -instead of
    -lib_LTLIBRARIES=libfoo.la

    - -

    To make it easier for other directories to link to your library, -you might want to assign its path to a variable in all HDF5 Makefiles. You can -make changes to all Makefiles by editing config/commence.am and adding a line -like

    - -

    LIBFOO=$(top_builddir)/foo/src/libfoo.la

    - -

    config/commence.am is textually included in all Makefiles.am when automake -processes them.
    -As always, if you change a Makefile.am or config/commence.am, don't forget to run -bin/reconfigure.

    -
    - -

    Changing HDF5's API

    - -

    If you have added or removed a function from HDF5, or if you have changed a function -signature, you must indicate this by updating the file lt_vers.am located in -the config directory.
    -If you have changed the API at all, increment LT_VERS_INTERFACE and set -LT_VERS_REVISION to zero.
    -If you have added functions but not altered or removed existing ones, also increment -LT_VERS_AGE.
    -If instead you have altered or removed any functions, reset LT_VERS_AGE to -zero.

    - - diff --git a/doc/html/TechNotes/Basic_perform.html b/doc/html/TechNotes/Basic_perform.html deleted file mode 100644 index 2a622fc..0000000 --- a/doc/html/TechNotes/Basic_perform.html +++ /dev/null @@ -1,75 +0,0 @@ - - - - Description of the three basic performance tools - - - -

    Description of the three basic performance tools

    - -

    iopipe

    -

    Times reads and writes to an HDF5 2-d dataset and compares that with - reads and writes using POSIX I/O. Reports seven measurements in - terms of CPU user time, CPU system time, elapsed time, and - bandwidth: - - -

    -
    fill raw: time it takes to memset() a buffer.
    -
    fill hdf5: time it takes to read from a dataset never written
    -
    out raw: time it takes to write using POSIX I/O
    -
    out hdf5: time it takes to write using H5Dwrite()
    -
    in raw: time it takes to read data just written using POSIX I/O
    -
    in hdf5: time it takes to H5Dread() data written with H5Dwrite()
    -
    in hdf5 partial: time it takes to H5Dread() the "center" area.
    -
    - - -

    This is a pretty stupid performance test. It accesses the same area - of file and memory over and over and the file size is way too - small. But it is good at showing how much overhead there is in the - library itself. - - -

    chunk

    -

    Determines how efficient the raw data cache is for various access - patterns of a chunked dataset, both reading and writing. The access - pattern is either (a) we access the entire dataset by moving a window - across and down a 2-d dataset in row-major order a full window - height and width at a time, or (b) we access part of a dataset by moving - the window diagonally from the (0,0) corner to the opposite corner - by half the window height and width at a time. The window is - measured in terms of the chunk size. - - -

    The result is: -
    A table written to stdout that contains the window size as a - fraction of the chunk size and the efficiencey of the cache (i.e., - number of bytes accessed by H5Dread() or H5Dwrite() divided by the - number of bytes of the dataset actually read or written by lower - layers. - - -

    A gnuplot script and data files which can be displayed by running - gnuplot and typing the command `load "x-gnuplot"'. - - -

    overhead

    -

    Measures the overhead used by the B-tree for indexing chunked - datasets. As data is written to a chunked dataset the B-tree - grows and its nodes get split. When a node splits one of three - ratios are used to determine how many items from the original node - go into the new left and right nodes, and these ratios affect the - total size of the B-tree in a way that depends on the order that - data is written to the dataset. - - -

    Invoke as `overhead usage' for more information. -


    -
    Robb Matzke
    - - -Last modified: Jun 4, 2003 - - - diff --git a/doc/html/TechNotes/BigDataSmMach.html b/doc/html/TechNotes/BigDataSmMach.html deleted file mode 100644 index fe00ff8..0000000 --- a/doc/html/TechNotes/BigDataSmMach.html +++ /dev/null @@ -1,122 +0,0 @@ - - - - Big Datasets on Small Machines - - - -

    Big Datasets on Small Machines

    - -

    1. Introduction

    - -

    The HDF5 library is able to handle files larger than the - maximum file size, and datasets larger than the maximum memory - size. For instance, a machine where sizeof(off_t) - and sizeof(size_t) are both four bytes can handle - datasets and files as large as 18x10^18 bytes. However, most - Unix systems limit the number of concurrently open files, so a - practical file size limit is closer to 512GB or 1TB. - -

    Two "tricks" must be imployed on these small systems in order - to store large datasets. The first trick circumvents the - off_t file size limit and the second circumvents - the size_t main memory limit. - -

    2. File Size Limits

    - -

    Systems that have 64-bit file addresses will be able to access - those files automatically. One should see the following output - from configure: - -

    -checking size of off_t... 8
    -    
    - -

    Also, some 32-bit operating systems have special file systems - that can support large (>2GB) files and HDF5 will detect - these and use them automatically. If this is the case, the - output from configure will show: - -

    -checking for lseek64... yes
    -checking for fseek64... yes
    -    
    - -

    Otherwise one must use an HDF5 file family. Such a family is - created by setting file family properties in a file access - property list and then supplying a file name that includes a - printf-style integer format. For instance: - -

    -hid_t plist, file;
    -plist = H5Pcreate (H5P_FILE_ACCESS);
    -H5Pset_family (plist, 1<<30, H5P_DEFAULT);
    -file = H5Fcreate ("big%03d.h5", H5F_ACC_TRUNC, H5P_DEFAULT, plist);
    -    
    - -

    The second argument (1<<30) to - H5Pset_family() indicates that the family members - are to be 2^30 bytes (1GB) each although we could have used any - reasonably large value. In general, family members cannot be - 2GB because writes to byte number 2,147,483,647 will fail, so - the largest safe value for a family member is 2,147,483,647. - HDF5 will create family members on demand as the HDF5 address - space increases, but since most Unix systems limit the number of - concurrently open files the effective maximum size of the HDF5 - address space will be limited (the system on which this was - developed allows 1024 open files, so if each family member is - approx 2GB then the largest HDF5 file is approx 2TB). - -

    If the effective HDF5 address space is limited then one may be - able to store datasets as external datasets each spanning - multiple files of any length since HDF5 opens external dataset - files one at a time. To arrange storage for a 5TB dataset split - among 1GB files one could say: - -

    -hid_t plist = H5Pcreate (H5P_DATASET_CREATE);
    -for (i=0; i<5*1024; i++) {
    -   sprintf (name, "velocity-%04d.raw", i);
    -   H5Pset_external (plist, name, 0, (size_t)1<<30);
    -}
    -    
    - -

    3. Dataset Size Limits

    - -

    The second limit which must be overcome is that of - sizeof(size_t). HDF5 defines a data type called - hsize_t which is used for sizes of datasets and is, - by default, defined as unsigned long long. - -

    To create a dataset with 8*2^30 4-byte integers for a total of - 32GB one first creates the dataspace. We give two examples - here: a 4-dimensional dataset whose dimension sizes are smaller - than the maximum value of a size_t, and a - 1-dimensional dataset whose dimension size is too large to fit - in a size_t. - -

    -hsize_t size1[4] = {8, 1024, 1024, 1024};
    -hid_t space1 = H5Screate_simple (4, size1, size1);
    -
    -hsize_t size2[1] = {8589934592LL};
    -hid_t space2 = H5Screate_simple (1, size2, size2};
    -    
    - -

    However, the LL suffix is not portable, so it may - be better to replace the number with - (hsize_t)8*1024*1024*1024. - -

    For compilers that don't support long long large - datasets will not be possible. The library performs too much - arithmetic on hsize_t types to make the use of a - struct feasible. - -


    -
    Robb Matzke
    - - -Last modified: Sun Jul 19 11:37:25 EDT 1998 - - - diff --git a/doc/html/TechNotes/ChStudy_1000x1000.gif b/doc/html/TechNotes/ChStudy_1000x1000.gif deleted file mode 100644 index b7d5a83..0000000 Binary files a/doc/html/TechNotes/ChStudy_1000x1000.gif and /dev/null differ diff --git a/doc/html/TechNotes/ChStudy_250x250.gif b/doc/html/TechNotes/ChStudy_250x250.gif deleted file mode 100644 index fe35f39..0000000 Binary files a/doc/html/TechNotes/ChStudy_250x250.gif and /dev/null differ diff --git a/doc/html/TechNotes/ChStudy_499x499.gif b/doc/html/TechNotes/ChStudy_499x499.gif deleted file mode 100644 index 0d2038b..0000000 Binary files a/doc/html/TechNotes/ChStudy_499x499.gif and /dev/null differ diff --git a/doc/html/TechNotes/ChStudy_5000x1000.gif b/doc/html/TechNotes/ChStudy_5000x1000.gif deleted file mode 100644 index 0f3c290..0000000 Binary files a/doc/html/TechNotes/ChStudy_5000x1000.gif and /dev/null differ diff --git a/doc/html/TechNotes/ChStudy_500x500.gif b/doc/html/TechNotes/ChStudy_500x500.gif deleted file mode 100644 index 38dd7d6..0000000 Binary files a/doc/html/TechNotes/ChStudy_500x500.gif and /dev/null differ diff --git a/doc/html/TechNotes/ChStudy_p1.gif b/doc/html/TechNotes/ChStudy_p1.gif deleted file mode 100644 index 938d133..0000000 Binary files a/doc/html/TechNotes/ChStudy_p1.gif and /dev/null differ diff --git a/doc/html/TechNotes/ChStudy_p1.obj b/doc/html/TechNotes/ChStudy_p1.obj deleted file mode 100644 index 6fbf583..0000000 --- a/doc/html/TechNotes/ChStudy_p1.obj +++ /dev/null @@ -1,113 +0,0 @@ -%TGIF 3.0-p5 -state(0,33,100,0,0,0,16,1,9,1,1,0,0,3,7,1,1,'Helvetica',0,24,0,0,0,10,0,0,1,1,0,16,0,0,1,1,1,0,1088,1408,0,0,2880). -% -% @(#)$Header$ -% %W% -% -unit("1 pixel/pixel"). -page(1,"",1). -box('black',64,64,384,384,0,1,1,22,0,0,0,0,0,'1',[ -]). -poly('black',2,[ - 128,64,128,384],0,1,1,23,0,0,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -poly('black',2,[ - 192,64,192,384],0,1,1,24,0,0,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -poly('black',2,[ - 256,64,256,384],0,1,1,25,0,0,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -poly('black',2,[ - 320,64,320,384],0,1,1,26,0,0,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -poly('black',2,[ - 64,128,384,128],0,1,1,27,0,0,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -poly('black',2,[ - 64,192,384,192],0,1,1,28,0,0,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -poly('black',2,[ - 64,256,384,256],0,1,1,29,0,0,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -poly('black',2,[ - 64,320,384,320],0,1,1,30,0,0,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -text('black',96,80,'Courier',0,17,1,1,0,1,7,14,37,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "1"]). -text('black',160,80,'Courier',0,17,1,1,0,1,7,14,39,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "2"]). -text('black',224,80,'Courier',0,17,1,1,0,1,7,14,41,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "3"]). -text('black',288,80,'Courier',0,17,1,1,0,1,7,14,43,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "4"]). -text('black',352,80,'Courier',0,17,1,1,0,1,7,14,47,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "5"]). -text('black',96,144,'Courier',0,17,1,1,0,1,7,14,51,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "6"]). -text('black',160,144,'Courier',0,17,1,1,0,1,7,14,53,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "7"]). -text('black',224,144,'Courier',0,17,1,1,0,1,7,14,55,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "8"]). -text('black',288,144,'Courier',0,17,1,1,0,1,7,14,57,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "9"]). -text('black',352,144,'Courier',0,17,1,1,0,1,14,14,59,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "10"]). -text('black',96,208,'Courier',0,17,1,1,0,1,14,14,61,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "11"]). -text('black',160,208,'Courier',0,17,1,1,0,1,14,14,63,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "12"]). -text('black',224,208,'Courier',0,17,1,1,0,1,14,14,65,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "13"]). -text('black',288,208,'Courier',0,17,1,1,0,1,14,14,67,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "14"]). -text('black',352,208,'Courier',0,17,1,1,0,1,14,14,71,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "15"]). -text('black',96,272,'Courier',0,17,1,1,0,1,14,14,75,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "16"]). -text('black',160,272,'Courier',0,17,1,1,0,1,14,14,77,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "17"]). -text('black',224,272,'Courier',0,17,1,1,0,1,14,14,79,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "18"]). -text('black',288,272,'Courier',0,17,1,1,0,1,14,14,81,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "19"]). -text('black',352,272,'Courier',0,17,1,1,0,1,14,14,83,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "20"]). -text('black',96,336,'Courier',0,17,1,1,0,1,14,14,87,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "21"]). -text('black',160,336,'Courier',0,17,1,1,0,1,14,14,89,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "22"]). -text('black',224,336,'Courier',0,17,1,1,0,1,14,14,91,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "23"]). -text('black',288,336,'Courier',0,17,1,1,0,1,14,14,93,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "24"]). -text('black',352,336,'Courier',0,17,1,1,0,1,14,14,95,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "25"]). -poly('black',2,[ - 416,64,416,384],3,1,1,100,0,0,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -poly('black',2,[ - 64,416,384,416],3,1,1,101,0,0,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -text('black',390,228,'Courier',0,17,1,0,0,1,14,35,102,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,1,0,[ - 390,228,390,228,425,242,0,-1000,1000,0,34,18,389,227,426,243],[ - "5,000"]). -text('black',224,432,'Courier',0,17,1,1,0,1,35,14,116,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "5,000"]). -text('black',160,512,'Courier',0,17,1,0,0,1,105,14,131,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "= 1,000 x 1,000"]). -box('black',80,480,144,544,7,1,1,134,0,0,0,0,0,'1',[ -]). -text('black',224,16,'Helvetica',0,24,1,1,0,1,296,29,144,0,24,5,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Order that data was written"]). -box('black',32,0,464,576,0,1,1,149,0,0,0,0,0,'1',[ -]). diff --git a/doc/html/TechNotes/ChunkingStudy.html b/doc/html/TechNotes/ChunkingStudy.html deleted file mode 100644 index 776b8fe..0000000 --- a/doc/html/TechNotes/ChunkingStudy.html +++ /dev/null @@ -1,190 +0,0 @@ - - - - Testing the chunked layout of HDF5 - - - - - -This document is of interest primarily for its discussion of the -HDF team's motivation for implementing raw data caching. -At a more abstract level, the discussion of the principles of -data chunking is also of interest, but a more recent discussion -of that topic can be found in -Dataset Chunking Issues in the -HDF5 User's Guide. - -The performance study described here predates the current chunking -implementation in the HDF5 library, so the particular performance data -is no longer apropos. -     -- the Editor - - -

    Testing the chunked layout of HDF5

    - -

    This is the results of studying the chunked layout policy in - HDF5. A 1000 by 1000 array of integers was written to a file - dataset extending the dataset with each write to create, in the - end, a 5000 by 5000 array of 4-byte integers for a total data - storage size of 100 million bytes. - -

    -

    - Order that data was written -
    Fig 1: Write-order of Output Blocks -
    - -

    After the array was written, it was read back in blocks that - were 500 by 500 bytes in row major order (that is, the top-left - quadrant of output block one, then the top-right quadrant of - output block one, then the top-left quadrant of output block 2, - etc.). - -

    I tried to answer two questions: -

      -
    • How does the storage overhead change as the chunk size - changes? -
    • What does the disk seek pattern look like as the chunk size - changes? -
    - -

    I started with chunk sizes that were multiples of the read - block size or k*(500, 500). - -

    -

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - Table 1: Total File Overhead -
    Chunk Size (elements)Meta Data Overhead (ppm)Raw Data Overhead (ppm)
    500 by 50085.840.00
    1000 by 100023.080.00
    5000 by 100023.080.00
    250 by 250253.300.00
    499 by 49985.84205164.84
    -
    - -
    -

    -

    - 500x500 -
    Fig 2: Chunk size is 500x500 -
    - -

    The first half of Figure 2 shows output to the file while the - second half shows input. Each dot represents a file-level I/O - request and the lines that connect the dots are for visual - clarity. The size of the request is not indicated in the - graph. The output block size is four times the chunk size which - results in four file-level write requests per block for a total - of 100 requests. Since file space for the chunks was allocated - in output order, and the input block size is 1/4 the output - block size, the input shows a staircase effect. Each input - request results in one file-level read request. The downward - spike at about the 60-millionth byte is probably the result of a - cache miss for the B-tree and the downward spike at the end is - probably a cache flush or file boot block update. - -


    -

    -

    - 1000x1000 -
    Fig 2: Chunk size is 1000x1000 -
    - -

    In this test I increased the chunk size to match the output - chunk size and one can see from the first half of the graph that - 25 file-level write requests were issued, one for each output - block. The read half of the test shows that four times the - amount of data was read as written. This results from the fact - that HDF5 must read the entire chunk for any request that falls - within that chunk, which is done because (1) if the data is - compressed the entire chunk must be decompressed, and (2) the - library assumes that a chunk size was chosen to optimize disk - performance. - -


    -

    -

    - 5000x1000 -
    Fig 3: Chunk size is 5000x1000 -
    - -

    Increasing the chunk size further results in even worse - performance since both the read and write halves of the test are - re-reading and re-writing vast amounts of data. This proves - that one should be careful that chunk sizes are not much larger - than the typical partial I/O request. - -


    -

    -

    - 250x250 -
    Fig 4: Chunk size is 250x250 -
    - -

    If the chunk size is decreased then the amount of data - transfered between the disk and library is optimal for no - caching, but the amount of meta data required to describe the - chunk locations increases to 250 parts per million. One can - also see that the final downward spike contains more file-level - write requests as the meta data is flushed to disk just before - the file is closed. - -


    -

    -

    - 499x499 -
    Fig 4: Chunk size is 499x499 -
    - -

    This test shows the result of choosing a chunk size which is - close to the I/O block size. Because the total size of the - array isn't a multiple of the chunk size, the library allocates - an extra zone of chunks around the top and right edges of the - array which are only partially filled. This results in - 20,516,484 extra bytes of storage, a 20% increase in the total - raw data storage size. But the amount of meta data overhead is - the same as for the 500 by 500 test. In addition, the mismatch - causes entire chunks to be read in order to update a few - elements along the edge or the chunk which results in a 3.6-fold - increase in the amount of data transfered. - -


    -
    HDF Help Desk
    - - -Last modified: 30 Jan 1998 (technical content) -
    -Last modified: 9 May 2000 (editor's note) - - - diff --git a/doc/html/TechNotes/CodeReview.html b/doc/html/TechNotes/CodeReview.html deleted file mode 100644 index 213cbbe..0000000 --- a/doc/html/TechNotes/CodeReview.html +++ /dev/null @@ -1,300 +0,0 @@ - - - - Code Review - - -

    Code Review 1

    - -

    Some background...

    -

    This is one of the functions exported from the - H5B.c file that implements a B-link-tree class - without worrying about concurrency yet (thus the `Note:' in the - function prologue). The H5B.c file provides the - basic machinery for operating on generic B-trees, but it isn't - much use by itself. Various subclasses of the B-tree (like - symbol tables or indirect storage) provide their own interface - and back end to this function. For instance, - H5G_stab_find() takes a symbol table OID and a name - and calls H5B_find() with an appropriate - udata argument that eventually gets passed to the - H5G_stab_find() function. - -

    - 1 /*-------------------------------------------------------------------------
    - 2  * Function:    H5B_find
    - 3  *
    - 4  * Purpose:     Locate the specified information in a B-tree and return
    - 5  *              that information by filling in fields of the caller-supplied
    - 6  *              UDATA pointer depending on the type of leaf node
    - 7  *              requested.  The UDATA can point to additional data passed
    - 8  *              to the key comparison function.
    - 9  *
    -10  * Note:        This function does not follow the left/right sibling
    -11  *              pointers since it assumes that all nodes can be reached
    -12  *              from the parent node.
    -13  *
    -14  * Return:      Success:        SUCCEED if found, values returned through the
    -15  *                              UDATA argument.
    -16  *
    -17  *              Failure:        FAIL if not found, UDATA is undefined.
    -18  *
    -19  * Programmer:  Robb Matzke
    -20  *              matzke@llnl.gov
    -21  *              Jun 23 1997
    -22  *
    -23  * Modifications:
    -24  *
    -25  *-------------------------------------------------------------------------
    -26  */
    -27 herr_t
    -28 H5B_find (H5F_t *f, const H5B_class_t *type, const haddr_t *addr, void *udata)
    -29 {
    -30    H5B_t        *bt=NULL;
    -31    intn         idx=-1, lt=0, rt, cmp=1;
    -32    int          ret_value = FAIL;
    -    
    - -

    All pointer arguments are initialized when defined. I don't - worry much about non-pointers because it's usually obvious when - the value isn't initialized. - -

    -33 
    -34    FUNC_ENTER (H5B_find, NULL, FAIL);
    -35 
    -36    /*
    -37     * Check arguments.
    -38     */
    -39    assert (f);
    -40    assert (type);
    -41    assert (type->decode);
    -42    assert (type->cmp3);
    -43    assert (type->found);
    -44    assert (addr && H5F_addr_defined (addr));
    -    
    - -

    I use assert to check invariant conditions. At - this level of the library, none of these assertions should fail - unless something is majorly wrong. The arguments should have - already been checked by higher layers. It also provides - documentation about what arguments might be optional. - -

    -45    
    -46    /*
    -47     * Perform a binary search to locate the child which contains
    -48     * the thing for which we're searching.
    -49     */
    -50    if (NULL==(bt=H5AC_protect (f, H5AC_BT, addr, type, udata))) {
    -51       HGOTO_ERROR (H5E_BTREE, H5E_CANTLOAD, FAIL);
    -52    }
    -    
    - -

    You'll see this quite often in the low-level stuff and it's - documented in the H5AC.c file. The - H5AC_protect insures that the B-tree node (which - inherits from the H5AC package) whose OID is addr - is locked into memory for the duration of this function (see the - H5AC_unprotect on line 90). Most likely, if this - node has been accessed in the not-to-distant past, it will still - be in memory and the H5AC_protect is almost a - no-op. If cache debugging is compiled in, then the protect also - prevents other parts of the library from accessing the node - while this function is protecting it, so this function can allow - the node to be in an inconsistent state while calling other - parts of the library. - -

    The alternative is to call the slighlty cheaper - H5AC_find and assume that the pointer it returns is - valid only until some other library function is called, but - since we're accessing the pointer throughout this function, I - chose to use the simpler protect scheme. All protected objects - must be unprotected before the file is closed, thus the - use of HGOTO_ERROR instead of - HRETURN_ERROR. - -

    -53    rt = bt->nchildren;
    -54 
    -55    while (lt<rt && cmp) {
    -56       idx = (lt + rt) / 2;
    -57       if (H5B_decode_keys (f, bt, idx)<0) {
    -58          HGOTO_ERROR (H5E_BTREE, H5E_CANTDECODE, FAIL);
    -59       }
    -60 
    -61       /* compare */
    -62       if ((cmp=(type->cmp3)(f, bt->key[idx].nkey, udata,
    -63                             bt->key[idx+1].nkey))<0) {
    -64          rt = idx;
    -65       } else {
    -66          lt = idx+1;
    -67       }
    -68    }
    -69    if (cmp) {
    -70       HGOTO_ERROR (H5E_BTREE, H5E_NOTFOUND, FAIL);
    -71    }
    -    
    - -

    Code is arranged in paragraphs with a comment starting each - paragraph. The previous paragraph is a standard binary search - algorithm. The (type->cmp3)() is an indirect - function call into the subclass of the B-tree. All indirect - function calls have the function part in parentheses to document - that it's indirect (quite obvious here, but not so obvious when - the function is a variable). - -

    It's also my standard practice to have side effects in - conditional expressions because I can write code faster and it's - more apparent to me what the condition is testing. But if I - have an assignment in a conditional expr, then I use an extra - set of parens even if they're not required (usually they are, as - in this case) so it's clear that I meant = instead - of ==. - -

    -72 
    -73    /*
    -74     * Follow the link to the subtree or to the data node.
    -75     */
    -76    assert (idx>=0 && idxnchildren);
    -77    if (bt->level > 0) {
    -78       if ((ret_value = H5B_find (f, type, bt->child+idx, udata))<0) {
    -79          HGOTO_ERROR (H5E_BTREE, H5E_NOTFOUND, FAIL);
    -80       }
    -81    } else {
    -82       ret_value = (type->found)(f, bt->child+idx, bt->key[idx].nkey,
    -83                                 udata, bt->key[idx+1].nkey);
    -84       if (ret_value<0) {
    -85          HGOTO_ERROR (H5E_BTREE, H5E_NOTFOUND, FAIL);
    -86       }
    -87    }
    -    
    - -

    Here I broke the "side effect in conditional" rule, which I - sometimes do if the expression is so long that the - <0 gets lost at the end. Another thing to note is - that success/failure is always determined by comparing with zero - instead of SUCCEED or FAIL. I do this - because occassionally one might want to return other meaningful - values (always non-negative) or distinguish between various types of - failure (always negative). - -

    -88 
    -89 done:
    -90    if (bt && H5AC_unprotect (f, H5AC_BT, addr, bt)<0) {
    -91       HRETURN_ERROR (H5E_BTREE, H5E_PROTECT, FAIL);
    -92    }
    -93    FUNC_LEAVE (ret_value);
    -94 }
    -    
    - -

    For lack of a better way to handle errors during error cleanup, - I just call the HRETURN_ERROR macro even though it - will make the error stack not quite right. I also use short - circuiting boolean operators instead of nested if - statements since that's standard C practice. - -

    Code Review 2

    - - -

    The following code is an API function from the H5F package... - -

    - 1 /*--------------------------------------------------------------------------
    - 2  NAME
    - 3     H5Fflush
    - 4 
    - 5  PURPOSE
    - 6     Flush all cached data to disk and optionally invalidates all cached
    - 7     data.
    - 8 
    - 9  USAGE
    -10     herr_t H5Fflush(fid, invalidate)
    -11         hid_t fid;              IN: File ID of file to close.
    -12         hbool_t invalidate;     IN: Invalidate all of the cache?
    -13 
    -14  ERRORS
    -15     ARGS      BADTYPE       Not a file atom. 
    -16     ATOM      BADATOM       Can't get file struct. 
    -17     CACHE     CANTFLUSH     Flush failed. 
    -18 
    -19  RETURNS
    -20     SUCCEED/FAIL
    -21 
    -22  DESCRIPTION
    -23         This function flushes all cached data to disk and, if INVALIDATE
    -24     is non-zero, removes cached objects from the cache so they must be
    -25     re-read from the file on the next access to the object.
    -26 
    -27  MODIFICATIONS:
    -28 --------------------------------------------------------------------------*/
    -    
    - -

    An API prologue is used for each API function instead of my - normal function prologue. I use the prologue from Code Review 1 - for non-API functions because it's more suited to C programmers, - it requires less work to keep it synchronized with the code, and - I have better editing tools for it. - -

    -29 herr_t
    -30 H5Fflush (hid_t fid, hbool_t invalidate)
    -31 {
    -32    H5F_t        *file = NULL;
    -33 
    -34    FUNC_ENTER (H5Fflush, H5F_init_interface, FAIL);
    -35    H5ECLEAR;
    -    
    - -

    API functions are never called internally, therefore I always - clear the error stack before doing anything. - -

    -36 
    -37    /* check arguments */
    -38    if (H5_FILE!=H5Aatom_group (fid)) {
    -39       HRETURN_ERROR (H5E_ARGS, H5E_BADTYPE, FAIL); /*not a file atom*/
    -40    }
    -41    if (NULL==(file=H5Aatom_object (fid))) {
    -42       HRETURN_ERROR (H5E_ATOM, H5E_BADATOM, FAIL); /*can't get file struct*/
    -43    }
    -    
    - -

    If something is wrong with the arguments then we raise an - error. We never assert arguments at this level. - We also convert atoms to pointers since atoms are really just a - pointer-hiding mechanism. Functions that can be called - internally always have pointer arguments instead of atoms - because (1) then they don't have to always convert atoms to - pointers, and (2) the various pointer data types provide more - documentation and type checking than just an hid_t - type. - -

    -44 
    -45    /* do work */
    -46    if (H5F_flush (file, invalidate)<0) {
    -47       HRETURN_ERROR (H5E_CACHE, H5E_CANTFLUSH, FAIL); /*flush failed*/
    -48    }
    -    
    - -

    An internal version of the function does the real work. That - internal version calls assert to check/document - it's arguments and can be called from other library functions. - -

    -49 
    -50    FUNC_LEAVE (SUCCEED);
    -51 }
    -    
    - -
    -
    Robb Matzke
    - - -Last modified: Mon Nov 10 15:33:33 EST 1997 - - - diff --git a/doc/html/TechNotes/Daily_Test_Explained.htm b/doc/html/TechNotes/Daily_Test_Explained.htm deleted file mode 100644 index ffa4798..0000000 --- a/doc/html/TechNotes/Daily_Test_Explained.htm +++ /dev/null @@ -1,863 +0,0 @@ - - - - - - - - -Daily Test Explained - - - - - - -
    - -

    Daily Test Explained

    - -

    Requirements for a Daily Test Host

    - -
      -
    • Kerberos - and AFS support
    • -
    • Remote - command execution (rsh or ssh) with Kerberos authentication support
    • -
    • make - that support srcdir compiling (highly desirable)
    • -
    • diff - that supports –I option (highly desirable for launching host)
    • -
    • cvs - command support (desirable)
    • -
    - -

    Directories/Files Used

    - -

    $HOME/snapshots-XXX is where daily tests occur.

    - -
      -
    • $HOME/snapshots-hdf5 - for hdf5 main trunk version (currently v1.5).
    • -
    • $HOME/snapshots-hdf5_1_4 - for hdf5 version 1.4.
    • -
    • $HOME/snapshots-hdf4 - for hdf4 main trunk version (currenly post 4.1r5).
    • -
    - -

    Inside snapshots-XXX Directory

    - -

    ·       -current/                     latest -version

    - -

    ·       -previous/                  last -released version

    - -

    ·       -log/                           log -files of most recent tests

    - -

    ·       -log/OLD/                  previous -log files

    - -

    ·       -TestDir/<host>/        build -and test area of machine <host> supporting srcdir build

    - -

    ·       -allhostfile                 holds -all test host names

    - -

    ·       -snaptest.cfg               holds -various test configurations

    - -

    ·       -release_always        always -make snapshot release tarball if all tests pass (implemented for hdf4 daily -tests only)

    - -

    ·       -release_asap            make -one snapshot release tarball if all tests pass (file is renamed after -release)

    - -

    ·       -release_not               do -not make snapshot release tarball even if all tests pass

    - -

    Steps

    - -

    This shows steps of the daily tests for HDF5 development -version (currenly v1.5).  The HDF5 v1.4 -and HDF4 are similar.  snapshots-XXX -here means $HOME/snapshots-hdf5/.

    - -

     

    - -
      -
    1. “hdfadmin” - starts a cron job after midnight in eirene.
    2. -
    3. Cron - job acquires kerberos credential and AFS tokens.
    4. -
    5. Execute - $HOME/.crondir/DailyMaint to start daily maintenance
    6. -
    - -

    ·       CVS -updates some documents on websites

    - -

    ·       CVS -updates $HOME/HDF5/v_1_5/hdf5/   (the -bin/runtest in it is ready to be used in  -next step)

    - -
      -
    1. Execute - $HOME/.bin-sys/DailyHDF5Test
    2. -
    - -

    ·       Clean -up snapshots-XXX/log area

    - -

    a.      -Purge older files from OLD/

    - -

    b.     -Moves log files from yesterday to OLD/

    - -

    ·       cd -$HOME/HDF5/v_1_5/hdf5

    - -

    ·       Launch -“bin/runtest –all” from eirene

    - -
      -
    1. bin/runtest - –all
    2. -
    - -

    ·       CVS -updates $HOME/snapshots-XXX/current (the commands in bin/ are now ready be used -in the following steps).

    - -

    ·       Executes -snapshots-XXX/current/bin/chkmanifest for MANIFEST file.

    - -

    ·       Diff -current/ and previous/ versions.  If no -significant differences found, no need to run daily test per hosts.  Will not make snapshot release tarball -either.

    - -

    ·       If -significant differences found, prepare to run the daily tests for all hosts.

    - -

    ·       Reads -allhostfile for test hosts.  For each -host:

    - -

    a.      -use ping then rsh/ssh to make sure the host is on line and -responding

    - -

    b.     -if srcdir is support, fork off the following command for all -hosts and wait for them to finish.  -Otherwise, launch one at a time.

    - -

    c.      -rsh host “cd $HOME/snapshots-XXX/hdf5; bin/runtest” >& -#<host>

    - -
      -
    1. bin/runtest - (one each in multiple hosts)
    2. -
    - -

    ·       Since -“-all” is not used, it is for launching the test for this host only.

    - -

    ·       Reads -snapshots-XXX/snaptest.cfg and looks for configuration entries that are for -this host.

    - -

    ·       For -each configuration, runs snapshots-XXX/bin/snapshot with the configuration.

    - -

    ·       Configure, -build and test results are stored in log/<host>_YYMMDD_HHMM (e.g., -arabica_021024_0019)

    - -
      -
    1. Back - to “bin/runtest –all” in eirene
    2. -
    - -

    ·       Gather -all those #<host> files and other summary report into one daily report -(e.g., DailyHDF5Tests-eirene_021024)

    - -

    ·       Checks -the tail of log/<host>_YYMMDD_HHMM to make sure it does complete -properly.

    - -
      -
    1. Back - to “.bin-sys/DailyHDF5Test”
    2. -
    - -

    ·       Do -a snapshot release if
    -    test-succeeded &&
    -    release-not-is-not-present -&&
    -    ( today-is-saturday || -release-asap-is-requested )

    - -

    ·       HDF4 -does not know how to create a release tarball.  -Its release process only renames current/ as previous/ to reduce future -test time.  It also supports an option -of release-always which tells daily test to make a release whenever all tests -pass.  The release-asap only make the -release once and the file is renamed, blocking any future ASAP release until -someone turns it on again.

    - -
      -
    1. Compose - a report and email “hdf5-cvs”
    2. -
    - -

    Acknowledgement

    - -

    Robb Matzke first setup the snapshot directory structure and -created pretty complete version of commands snaptest, release and h5ver. The -initial version is for testing in one host with the default configuration.  I just added more whistles and bells.  Jim Barlow helped me how to authenticate a -cron task with keytab.

    - -

    ----

    - -

    First -created by Albert Cheng, October 24, 2002.

    - -

    Revised -October 28, 2002.

    - -
    - - - - diff --git a/doc/html/TechNotes/DataTransformReport.htm b/doc/html/TechNotes/DataTransformReport.htm deleted file mode 100644 index 5a1a158..0000000 --- a/doc/html/TechNotes/DataTransformReport.htm +++ /dev/null @@ -1,877 +0,0 @@ - - - - - - - - -Arithmetic Data Transforms - - - - - - - - -
    - -

    Arithmetic Data Transforms

    - -

    Leon Arber, Albert -Cheng, William Wendling[1]

    - -

    December 10, 2004

    - -

    Purpose

    - -

    Data can be stored and represented in many different -ways.  In most fields of science, for -example, the metric system is used for storing all data.  However, many fields of engineering still use -the English system.  In such scenarios, -there needs to be a way to easily perform arbitrary scaling of data.  The data transforms provide just such -functionality.  They allow arbitrary -arithmetic expressions to be applied to a dataset during read and write -operations.  This means that data can be -stored in Celsius in a data file, but read in and automatically converted to -Fahrenheit.  Alternatively, data that is -obtained in Fahrenheit can be written out to the data file in Celsius. 

    - -

     

    - -

    Although a user can always manually modify the data they -read and write, having the data transform as a property means that the user -doesn’t have to worry about forgetting to call the conversion function or even -writing it in the first place.

    - -

     

    - -

    Usage

    - -

    The data transform functionality is implemented as a -property that is set on a dataset transfer property list.  There are two functions available: one for -setting the transform and another for finding out what transform, if any, is -currently set.

    - -

     

    - -

    The function for setting the transform is:

    - -

    herr_t -H5Pset_data_transform(hid_t plist_id, const char* expression)

    - -

     

    - -

    plist_id -is the identifier of the dataset transfer property list on which the -data transform property should be set.

    - -

    expression -is a pointer to a string of the form “(5/9.0)*(x-32)” which describes -the transform.

    - -

     

    - -

    The function for getting the transform is:

    - -

    ssize_t -H5Pget_data_transform(hid_t plist_id, char* expression, size_t size)

    - -

     

    - -

    plist_id -is the identifier of the dataset transfer property list which will be -queried for its data transform property.

    - -

    expression -is either NULL or a pointer to memory where the data transform string, -if present, will be copied.

    - -

    size -is the number of bytes to copy from the transform string into -expression.  H5Pget_data_transform will -never copy more than the length of the transform expression.

    - -

     

    - -

    Data Transform Expressions

    - -

    Data transforms are set by passing a pointer to a string, -which is the data transform expression.  -This string describes what sort of arithmetic transform should be done -during data transfer of read or write.  -The string is a standard mathematical expression, as would be entered -into a something like MATLAB. 

    - -

    Expressions are defined by the following context-free -grammar:

    - -

     

    - -

    expr:=  term | term + -term | term - term

    - -

    term := factor | factor * factor | factor / factor

    - -

    factor :=  number | -symbol | - factor | + factor | ( expr )

    - -

    symbol := [a-zA-Z][a-zA-Z0-9]*

    - -

    number := INT | FLOAT

    - -

     

    - -

    where INT is interpreted as a C long int and FLOAT is interpreted -as a C double

    - -

     

    - -

    This grammar allows for order of operations (multiplication -and dividision take precedence over addition and subtraction), floating and -integer constants, and grouping of terms by way of parentheses.  Although the grammar allows symbols to be -arbitrary strings, this documentation will always use ‘x’ for symbols.

    - -

     

    - -

    Within a transform expression, the symbol represents a -variable which contains the data to be manipulated.  For this reason, the terms symbol and -variable will be used interchangeably.  -Furthermore, in the current implementation of data transforms, all -symbols appearing in an expression are interpreted as referring to the same -dataset.  So, an expression such as -“alpha + 5” is equivalent to “x+5” and an expression such as “alpha + 3*beta + -5” is equivalent to “alpha + 3*alpha + 5” which is equivalent to “4*x + -5”.   

    - -

     

    - -

    Data Transform Implementation

    - -

    When the data transform property of a dataset transfer -property list is set, a parse tree of the expression is immediately generated -and its root is saved in the property list.  -The generation of the parse involves several steps.

    - -

     

    - -

    First, the expression is reduced, so as to simply the final -parse and speed up the transform operations.  -Expressions such as “(5/9.0) * (x-32)” will be reduced to -“.555555*(x-32).”  While further -simplification is algebraically possible, the data transform code will only -reduce simple trivial arithmetic operations.  -

    - -

     

    - -

    Then, this reduced expression is parsed into a set of -tokens, from which the parse tree is generated.  -From the expression “(5/9.0)*(x-32),” for example, the following parse -tree would be created:

    - -

     

    - -

     

    - -

     

    - -

     

    - -

                                                   *

    - -

                                              /          \  -

    - -

                                           .555555     -

    - -

                                                         /   \                                               

    - -

                                                         x  -32

    - -

              

    - -

    HDread with Data Transform Expressions

    - -

    When a read is performed with a dataset transfer property -list that has the data transform property set, the following sequence of events -occurs:

    - -

     

    - -
      -
    1. A - piece of the file is read into memory
    2. -
    3. The - data transform is performed on this piece of memory
    4. -
    5. This piece - of memory is then copied to the user
    6. -
    7. Steps - 1 – 3 are repeated until the read is complete.
    8. -
    - -

     

    - -

    Step 2 works like this:

    - -

     

    - -
      -
    1. The - function responsible for doing the transform is passed a buffer and is - informed what type of data is inside this buffer and how many elements - there are.
    2. -
    3. This - buffer is then treated as the variable in the data transform expression - and the transform expression is applied.
    4. -
    5. The - transformed buffer is returned to the library.
    6. -
    - -

     

    - -

    If the transform expression is “(5/9.0)*(x-32),” with the -parse tree shown above and the buffer contains [-10 0 10 50 100], then the -intermediate steps involved in the transform are:

    - -

     

    - -
      -
    1. First, the (x-32) subexpression is evaluated.  Now the buffer would contain           [-42 -32 -22 18 68]
    2. -
    3. Then, - the .55555 * part of the expression is evaluated.  Now the buffer would contain: [-23.3333 - -17.7777 -12.2222 9.9999 37.7777]
    4. -
    5. Now, - the transform would be completed and the resulting buffer returned.
    6. -
    - -

     

    - -

    Note that the original data in the file was not modified.

    - -

     

    - -

    HDwrite with Data Transform Expressions

    - -

    The process of a write works much the same way, but in the -reverse order.  When a file is written -out with a dataset transfer property list that has the data transform property -set:

    - -

     

    - -
      -
    1. The - user passes a buffer to HDwrite, along with the type and number of - elements.
    2. -
    3. The - data transform is performed on a copy of this piece of memory.
    4. -
    5. This - copy with the transformed data is then written out to the file.
    6. -
    - -

     

    - -

    Step 2 works exactly as in the read example.  Note that the user’s data is not modified.  Also, since the transform property is not -saved with the dataset, in order to recover the original data, a user must know -the inverse of the transform that was applied in order to recover it.  In the case of “(5/9.0)*(x-32)” this inverse -would be “(9/5.0)*x + 32”.  Reading from -a data file that had previously been written out with a transform string of -“(5/9.0)*(x-32)” with a transform string of “(9/5.0)*x + 32” would effectively -recover the original data the author of the file had been using.[2]

    - -

     

    - -

    Mixed Mode and Truncation

    - -

    Because the data transform sits and modifies data between -the file space and the memory space, various effects can occur that are the -result of the typecasting that may be involved in the operations.   In addition, because constants in the data -transform expression can be either INT or FLOAT, the data transform itself can -be a source of truncation.

    - -

     

    - -

    In the example above, the reason that the transform -expression is always written as “(5/9.0)*(x-32)” is because, if it were written -without a floating point constant, it would always evaluate to 0.  The expression “(5/9)*(x-32)” would, when -set, get reduced to “0*(x-32)” because both 5 and 9 would get read as C long -ints and, when divided, the result would get truncated to 0.  This resulting expression, “0*(x-32),” would -cause any data read or written to be saved as an array of all 0’s. 

    - -

     

    - -

    Another source of unpredictability caused by truncation -occurs when intermediate data is of a type that is more precise than the -destination memory type.  For example, if -the transform expression “(1/2.0)*x” is applied to data read from a file that -is being read into an integer memory buffer, the results can be -unpredictable.  If the source array is [1 -2 3 4], then the resulting array could be either [0 1 1 2] or [0 0 1 1], -depending on the floating point unit of the processors.  Note that this result is independent of the -source data type.  It doesn’t matter if -the source data is integer or floating point because the 2.0 in the data -transform expression will cause everything to be evaluated in a floating-point -context.

    - -

     

    - -

    When setting transform expressions, care must be taken to -ensure that the truncation does not adversely affect the data.  A workaround for the possible effects of a -transform such as “(1/2.0) * x” would be to used the transform expression -“(1/2.0)*x + 0.5” instead of the original.  -This will ensure that all truncation rounds up, with the possible -exception of a boundary condition.

    - -

     

    - -

    Data Transform Example

    - -

    The following code snippet shows an example using data -transform, where the data transform property is set and a write is -performed.  Then, a read is performed -with no data transform property set.  It -is assumed that dataset is a dataset -that has been opened and windchillF and -windchillC are both arrays that hold -floating point data.  The result of this -snippet is to fill windchillC with the -data in windchillF, converted to -Celcius.

    - -

     

    - -

    hid_t dxpl_id_c_to_f;

    - -

    const char* c_to_f = -“(9/5.0)*x + 32”;

    - -

     

    - -

    /* Create the dataset -transfer property list */

    - -

        dxpl_id_c_to_f = -H5Pcreate(H5P_DATASET_XFER);

    - -

     

    - -

    /* Set the data transform -to be used on the read*/

    - -

        H5Pset_data_transform(dxpl_id_c_to_f, -c_to_f);

    - -

     

    - -

       

    - -

    /*

    - -

    * Write the data to the -dataset using the f_to_c transform

    - -

    */

    - -

        status = H5Dwrite(dataset, H5T_NATIVE_FLOAT, -H5S_ALL, H5S_ALL, dxpl_id_f_to_c, windchillF);

    - -

       

    - -

    /* Read the data with the -c_to_f data transform */

    - -

        H5Dread(dataset, H5T_NATIVE_FLOAT, H5S_ALL, -H5S_ALL, H5P_DEFAULT, windchillC);

    - -

     

    - -

    H5Pget_data_transform Details

    - -

    Querying the data transform string of a dataset transfer -property list requires the use of the H5Pget_data_transform function.  This function provides the ability to both -query the size of the string stored and retrieve part or all of it.  Note that H5Pget_data_transform will return -the expression that was set by H5Pset_data_transform.  The reduced transform string, computed when -H5Pset_data_transform is called, is not stored in string form and is not -available to the user.

    - -

     

    - -

    In order to ascertain the size of the string, a NULL expression should be passed to the -function.  This will make the function -return the length of the transform string (not including the terminated ‘\0’ -character).

    - -

     

    - -

    To actually retrieve the string, a pointer to a valid memory -location should be passed in for expression and -the number of bytes from the string that should be copied to that memory -location should be passed in as size.

    - -

     

    - -

    Further Work

    - -

    Some additional functionality can still be added to the data -transform.  Currently the most important -feature lacking is the addition of operators, such as exponentiation and the -trigonometric functions.  Although -exponentiation can be explicitly carried with a transform expression such as -“x*x*x” it may be easier to support expression like “x^3.” Also lacking are the -commonly used trigonometric functions, such as sin, cos, and tan. 

    - -

     

    - -

    Popular constants could also be added, such as π or -e. 

    - -

     

    - -

    More advanced functionality, such as the ability to perform -a transform on multiple datasets is also a possibility, but is a feature is -more a completely new addition than an extension to data transforms. 

    - -
    - -

    - -
    - - - -
    - -

    [1] Mr. -Wendling, who involved in the initial design and implemented the expression -parser, has left NCSA.

    - -
    - -
    - -

    [2] See the -h5_dtransform.c example in the examples directory of the hdf5 library for just -such an illustration.

    - -
    - -
    - - - - diff --git a/doc/html/TechNotes/ExternalFiles.html b/doc/html/TechNotes/ExternalFiles.html deleted file mode 100644 index c3197af..0000000 --- a/doc/html/TechNotes/ExternalFiles.html +++ /dev/null @@ -1,279 +0,0 @@ - - - - External Files in HDF5 - - - -

    External Files in HDF5

    - -

    Overview of Layers

    - -

    This table shows some of the layers of HDF5. Each layer calls - functions at the same or lower layers and never functions at - higher layers. An object identifier (OID) takes various forms - at the various layers: at layer 0 an OID is an absolute physical - file address; at layers 1 and 2 it's an absolute virtual file - address. At layers 3 through 6 it's a relative address, and at - layers 7 and above it's an object handle. - -

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Layer-7GroupsDatasets
    Layer-6Indirect StorageSymbol Tables
    Layer-5B-treesObject HdrsHeaps
    Layer-4Caching
    Layer-3H5F chunk I/O
    Layer-2H5F low
    Layer-1File FamilySplit Meta/Raw
    Layer-0Section-2 I/OStandard I/OMalloc/Free
    -
    - -

    Single Address Space

    - -

    The simplest form of hdf5 file is a single file containing only - hdf5 data. The file begins with the boot block, which is - followed until the end of the file by hdf5 data. The next most - complicated file allows non-hdf5 data (user defined data or - internal wrappers) to appear before the boot block and after the - end of the hdf5 data. The hdf5 data is treated as a single - linear address space in both cases. - -

    The next level of complexity comes when non-hdf5 data is - interspersed with the hdf5 data. We handle that by including - the non-hdf5 interspersed data in the hdf5 address space and - simply not referencing it (eventually we might add those - addresses to a "do-not-disturb" list using the same mechanism as - the hdf5 free list, but it's not absolutely necessary). This is - implemented except for the "do-not-disturb" list. - -

    The most complicated single address space hdf5 file is when we - allow the address space to be split among multiple physical - files. For instance, a >2GB file can be split into smaller - chunks and transfered to a 32 bit machine, then accessed as a - single logical hdf5 file. The library already supports >32 bit - addresses, so at layer 1 we split a 64-bit address into a 32-bit - file number and a 32-bit offset (the 64 and 32 are - arbitrary). The rest of the library still operates with a linear - address space. - -

    Another variation might be a family of two files where all the - meta data is stored in one file and all the raw data is stored - in another file to allow the HDF5 wrapper to be easily replaced - with some other wrapper. - -

    The H5Fcreate and H5Fopen functions - would need to be modified to pass file-type info down to layer 2 - so the correct drivers can be called and parameters passed to - the drivers to initialize them. - -

    Implementation

    - -

    I've implemented fixed-size family members. The entire hdf5 - file is partitioned into members where each member is the same - size. The family scheme is used if one passes a name to - H5F_open (which is called by H5Fopen() - and H5Fcreate) that contains a - printf(3c)-style integer format specifier. - Currently, the default low-level file driver is used for all - family members (H5F_LOW_DFLT, usually set to be Section 2 I/O or - Section 3 stdio), but we'll probably eventually want to pass - that as a parameter of the file access property list, which - hasn't been implemented yet. When creating a family, a default - family member size is used (defined at the top H5Ffamily.c, - currently 64MB) but that also should be settable in the file - access property list. When opening an existing family, the size - of the first member is used to determine the member size - (flushing/closing a family ensures that the first member is the - correct size) but the other family members don't have to be that - large (the local address space, however, is logically the same - size for all members). - -

    I haven't implemented a split meta/raw family yet but am rather - curious to see how it would perform. I was planning to use the - `.h5' extension for the meta data file and `.raw' for the raw - data file. The high-order bit in the address would determine - whether the address refers to meta data or raw data. If the user - passes a name that ends with `.raw' to H5F_open - then we'll chose the split family and use the default low level - driver for each of the two family members. Eventually we'll - want to pass these kinds of things through the file access - property list instead of relying on naming convention. - -

    External Raw Data

    - -

    We also need the ability to point to raw data that isn't in the - HDF5 linear address space. For instance, a dataset might be - striped across several raw data files. - -

    Fortunately, the only two packages that need to be aware of - this are the packages for reading/writing contiguous raw data - and discontiguous raw data. Since contiguous raw data is a - special case, I'll discuss how to implement external raw data in - the discontiguous case. - -

    Discontiguous data is stored as a B-tree whose keys are the - chunk indices and whose leaf nodes point to the raw data by - storing a file address. So what we need is some way to name the - external files, and a way to efficiently store the external file - name for each chunk. - -

    I propose adding to the object header an External File - List message that is a 1-origin array of file names. - Then, in the B-tree, each key has an index into the External - File List (or zero for the HDF5 file) for the file where the - chunk can be found. The external file index is only used at - the leaf nodes to get to the raw data (the entire B-tree is in - the HDF5 file) but because of the way keys are copied among - the B-tree nodes, it's much easier to store the index with - every key. - -

    Multiple HDF5 Files

    - -

    One might also want to combine two or more HDF5 files in a - manner similar to mounting file systems in Unix. That is, the - group structure and meta data from one file appear as though - they exist in the first file. One opens File-A, and then - mounts File-B at some point in File-A, the mount - point, so that traversing into the mount point actually - causes one to enter the root object of File-B. File-A and - File-B are each complete HDF5 files and can be accessed - individually without mounting them. - -

    We need a couple additional pieces of machinery to make this - work. First, an haddr_t type (a file address) doesn't contain - any info about which HDF5 file's address space the address - belongs to. But since haddr_t is an opaque type except at - layers 2 and below, it should be quite easy to add a pointer to - the HDF5 file. This would also remove the H5F_t argument from - most of the low-level functions since it would be part of the - OID. - -

    The other thing we need is a table of mount points and some - functions that understand them. We would add the following - table to each H5F_t struct: - -

    -struct H5F_mount_t {
    -   H5F_t *parent;         /* Parent HDF5 file if any */
    -   struct {
    -      H5F_t *f;           /* File which is mounted */
    -      haddr_t where;      /* Address of mount point */
    -   } *mount;              /* Array sorted by mount point */
    -   intn nmounts;          /* Number of mounted files */
    -   intn alloc;            /* Size of mount table */
    -}
    -    
    - -

    The H5Fmount function takes the ID of an open - file or group, the name of a to-be-mounted file, the name of the mount - point, and a file access property list (like H5Fopen). - It opens the new file and adds a record to the parent's mount - table. The H5Funmount function takes the parent - file or group ID and the name of the mount point and disassociates - the mounted file from the mount point. It does not close the - mounted file. The H5Fclose - function closes/unmounts files recursively. - -

    The H5G_iname function which translates a name to - a file address (haddr_t) looks at the mount table - at each step in the translation and switches files where - appropriate. All name-to-address translations occur through - this function. - -

    How Long?

    - -

    I'm expecting to be able to implement the two new flavors of - single linear address space in about two days. It took two hours - to implement the malloc/free file driver at level zero and I - don't expect this to be much more work. - -

    I'm expecting three days to implement the external raw data for - discontiguous arrays. Adding the file index to the B-tree is - quite trivial; adding the external file list message shouldn't - be too hard since the object header message class from wich this - message derives is fully implemented; and changing - H5F_istore_read should be trivial. Most of the - time will be spent designing a way to cache Unix file - descriptors efficiently since the total number open files - allowed per process could be much smaller than the total number - of HDF5 files and external raw data files. - -

    I'm expecting four days to implement being able to mount one - HDF5 file on another. I was originally planning a lot more, but - making haddr_t opaque turned out to be much easier - than I planned (I did it last Fri). Most of the work will - probably be removing the redundant H5F_t arguments for lots of - functions. - -

    Conclusion

    - -

    The external raw data could be implemented as a single linear - address space, but doing so would require one to allocate large - enough file addresses throughout the file (>32bits) before the - file was created. It would make mixing an HDF5 file family with - external raw data, or external HDF5 wrapper around an HDF4 file - a more difficult process. So I consider the implementation of - external raw data files as a single HDF5 linear address space a - kludge. - -

    The ability to mount one HDF5 file on another might not be a - very important feature especially since each HDF5 file must be a - complete file by itself. It's not possible to stripe an array - over multiple HDF5 files because the B-tree wouldn't be complete - in any one file, so the only choice is to stripe the array - across multiple raw data files and store the B-tree in the HDF5 - file. On the other hand, it might be useful if one file - contains some public data which can be mounted by other files - (e.g., a mesh topology shared among collaborators and mounted by - files that contain other fields defined on the mesh). Of course - the applications can open the two files separately, but it might - be more portable if we support it in the library. - -

    So we're looking at about two weeks to implement all three - versions. I didn't get a chance to do any of them in AIO - although we had long-term plans for the first two with a - possibility of the third. They'll be much easier to implement in - HDF5 than AIO since I've been keeping these in mind from the - start. - -


    -
    Robb Matzke
    - - -Last modified: Tue Sep 8 14:43:32 EDT 1998 - - - diff --git a/doc/html/TechNotes/FreeLists.html b/doc/html/TechNotes/FreeLists.html deleted file mode 100644 index 1a4b8e8..0000000 --- a/doc/html/TechNotes/FreeLists.html +++ /dev/null @@ -1,205 +0,0 @@ - - -Memory Management and Free Lists - - - - -

    Memory Management and Free Lists

    - -
    -
    -At Release 1.2.2, free list management code was introduced to the HDF5 
    -library.  This included one user-level function, H5garbage_collect, which 
    -garbage collects on all the free-lists.  H5garbage_collect is the only user-
    -accessible (i.e., application developer-accessible) element of this 
    -functionality.
    -
    -The free-lists generally reduce the amount of dynamic memory used to around 
    -75% of the pre-optimized amount as well as speed up the time in the library 
    -code by ~5% The free-lists also help linearize the amount of memory used with 
    -increasing numbers of datasets or re-writes on the data, so the amount of 
    -memory used for the 1500/45 free-list case is only 66% of the memory used for 
    -the unoptimized case.
    -
    -Overall, the introduction of free list management is a win: the library is 
    -slightly faster and uses much less system resources than before.  Most of the
    -emphasis has been focused on the main "thouroughfares" through the code;
    -less attention was paid to the "back streets" which are used much less 
    -frequently and offer less potential for abuse.
    -
    -Adding a free-list for a data structure in the HDF5 library code is easy:
    -
    -Old code:
    ----------
    -    int foo(void)
    -    {
    -        H5W_t *w;
    -
    -        for(i=0; i<x; i++) {
    -            w=H5MM_malloc(sizeof(H5W_t));
    -            <use w>
    -            H5MM_xfree(w);
    -        }
    -    }
    -
    -New code:
    ----------
    -H5FL_DEFINE(H5W_t);
    -
    -    int foo(void)
    -    {
    -        H5W_t *w;
    -
    -        for(i=0; i<x; i++) {
    -            w=H5FL_ALLOC(H5W_t,0);
    -            <use w>
    -            H5FL_FREE(H5W_t,w);
    -        }
    -    }
    -
    -
    -There are three kinds of free-lists: 
    -   -- for "regular" objects, 
    -   -- arrays of fixed size object (both fixed length and unknown length), and 
    -   -- "blocks" of bytes.  
    - 
    -   "Regular" free-lists use the H5FL_<*> macros in H5FLprivate.h and are
    -   designed for single, fixed-size data structures like typedef'ed structs,
    -   etc.  
    -
    -   Arrays of objects use the H5FL_ARR_<*> macros and are designed for arrays 
    -   (both fixed in length and varying lengths) of fixed length data structures 
    -   (like typedef'ed types).  
    -
    -   "Block" free-lists use the H5FL_BLK_<*> macros and are designed to hold 
    -   varying sized buffers of bytes, with no structure.  
    -
    -   H5S.c contains examples for "regular" and fixed-sized arrays;
    -   H5B.c contains examples for variable-sized arrays and "blocks".
    -
    -A free-list doesn't have to be used for every data structure allocated and
    -freed, just for those which are prone to abuse when multiple operations are
    -being performed.  It is important to use the macros for declaring and
    -manipulating the free-lists however; they allow the free'd objects on the 
    -lists to be garbage collected by the library at the library's termination 
    -or at the user's request.
    -
    -One public API function has been added: H5garbage_collect, which garbage 
    -collects on all the free-lists of all the different types.  It's not required 
    -to be called and is only necessary in situations when the application 
    -performs actions which cause the library to allocate many objects and then 
    -the application eventually releases those objects and wants to reduce the 
    -memory used by the library from the peak usage required.  The library 
    -automatically garbage collects all the free lists when the application ends.
    -
    -Questions should be sent to the HDF Help Desk at hdfhelp@ncsa.uiuc.edu.
    -
    -
    -===========================================
    -BENCHMARK INFORMATION
    -===========================================
    -
    -New version with free lists:
    -
    -Datasets=500, Data Rewrites=15:
    -    Peak Heap Usage: 18210820 bytes
    -    Time in library code: 2.260 seconds
    -    # of malloc calls: 22864
    -
    -Datasets=1000, Data Rewrites=15:
    -    Peak Heap Usage: 31932420 bytes
    -    Time in library code: 5.090 seconds
    -    # of malloc calls: 43045
    -
    -Datasets=1500, Data Rewrites=15:
    -    Peak Heap Usage: 41566212 bytes
    -    Time in library code: 8.623 seconds
    -    # of malloc calls: 60623
    -
    -Datasets=500, Data Rewrites=30:
    -    Peak Heap Usage: 19456004 bytes
    -    Time in library code: 4.274 seconds
    -    # of malloc calls: 23353
    -
    -Datasets=1000, Data Rewrites=30:
    -    Peak Heap Usage: 33988612 bytes
    -    Time in library code: 9.955 seconds
    -    # of malloc calls: 43855
    -
    -Datasets=1500, Data Rewrites=30:
    -    Peak Heap Usage: 43950084 bytes
    -    Time in library code: 17.413 seconds
    -    # of malloc calls: 61554
    -
    -Datasets=500, Data Rewrites=45:
    -    Peak Heap Usage: 20717572 bytes
    -    Time in library code: 6.326 seconds
    -    # of malloc calls: 23848
    -
    -Datasets=1000, Data Rewrites=45:
    -    Peak Heap Usage: 35807236 bytes
    -    Time in library code: 15.146 seconds
    -    # of malloc calls: 44572
    -
    -Datasets=1500, Data Rewrites=45:
    -    Peak Heap Usage: 46022660 bytes
    -    Time in library code: 27.140 seconds
    -    # of malloc calls: 62370
    -
    -
    -Older version with no free lists:
    -
    -Datasets=500, Data Rewrites=15:
    -    Peak Heap Usage: 25370628 bytes
    -    Time in library code: 2.329 seconds
    -    # of malloc calls: 194991
    -
    -Datasets=1000, Data Rewrites=15:
    -    Peak Heap Usage: 39550980 bytes
    -    Time in library code: 5.251 seconds
    -    # of malloc calls: 417971
    -
    -Datasets=1500, Data Rewrites=15:
    -    Peak Heap Usage: 68870148 bytes
    -    Time in library code: 8.913 seconds
    -    # of malloc calls: 676564
    -
    -Datasets=500, Data Rewrites=30:
    -    Peak Heap Usage: 31670276 bytes
    -    Time in library code: 4.435 seconds
    -    # of malloc calls: 370320
    -
    -Datasets=1000, Data Rewrites=30:
    -    Peak Heap Usage: 44646404 bytes
    -    Time in library code: 10.325 seconds
    -    # of malloc calls: 797125
    -
    -Datasets=1500, Data Rewrites=30:
    -    Peak Heap Usage: 68870148 bytes
    -    Time in library code: 18.057 seconds
    -    # of malloc calls: 1295336
    -
    -Datasets=500, Data Rewrites=45:
    -    Peak Heap Usage: 33906692 bytes
    -    Time in library code: 6.577 seconds
    -    # of malloc calls: 545656
    -
    -Datasets=1000, Data Rewrites=45:
    -    Peak Heap Usage: 56778756 bytes
    -    Time in library code: 15.720 seconds
    -    # of malloc calls: 1176285
    -
    -Datasets=1500, Data Rewrites=45:
    -    Peak Heap Usage: 68870148 bytes
    -    Time in library code: 28.138 seconds
    -    # of malloc calls: 1914097
    -
    -
    -===========================================
    -Last Modified:  3 May 2000
    -HDF Help Desk:  hdfhelp@ncsa.uiuc.edu
    -
    -
    - - diff --git a/doc/html/TechNotes/H4-H5Compat.html b/doc/html/TechNotes/H4-H5Compat.html deleted file mode 100644 index 2992476..0000000 --- a/doc/html/TechNotes/H4-H5Compat.html +++ /dev/null @@ -1,271 +0,0 @@ - - - - Backward/Forward Compatability - - - -

    Backward/Forward Compatability

    - -

    The HDF5 development must proceed in such a manner as to - satisfy the following conditions: - -

      -
    1. HDF5 applications can produce data that HDF5 - applications can read and write and HDF4 applications can produce - data that HDF4 applications can read and write. The situation - that demands this condition is obvious.
    2. - -
    3. HDF5 applications are able to produce data that HDF4 applications - can read and HDF4 applications can subsequently modify the - file subject to certain constraints depending on the - implementation. This condition is for the temporary - situation where a consumer has neither been relinked with a new - HDF4 API built on top of the HDF5 API nor recompiled with the - HDF5 API.
    4. - -
    5. HDF5 applications can read existing HDF4 files and subsequently - modify the file subject to certain constraints depending on - the implementation. This is condition is for the temporary - situation in which the producer has neither been relinked with a - new HDF4 API built on top of the HDF5 API nor recompiled with - the HDF5 API, or the permanent situation of HDF5 consumers - reading archived HDF4 files.
    6. - - -

      There's at least one invarient: new object features introduced - in the HDF5 file format (like 2-d arrays of structs) might be - impossible to "translate" to a format that an old HDF4 - application can understand either because the HDF4 file format - or the HDF4 API has no mechanism to describe the object. - -

      What follows is one possible implementation based on how - Condition B was solved in the AIO/PDB world. It also attempts - to satisfy these goals: - -

        -
      1. The main HDF5 library contains as little extra baggage as - possible by either relying on external programs to take care - of compatability issues or by incorporating the logic of such - programs as optional modules in the HDF5 library. Conditions B - and C are separate programs/modules.
      2. - -
      3. No extra baggage not only means the library proper is small, - but also means it can be implemented (rather than migrated - from HDF4 source) from the ground up with minimal regard for - HDF4 thus keeping the logic straight forward.
      4. - -
      5. Compatability issues are handled behind the scenes when - necessary (and possible) but can be carried out explicitly - during things like data migration.
      6. -
      - -
      -

      Wrappers

      - -

      The proposed implementation uses wrappers to handle - compatability issues. A Format-X file is wrapped in a - Format-Y file by creating a Format-Y skeleton that replicates - the Format-X meta data. The Format-Y skeleton points to the raw - data stored in Format-X without moving the raw data. The - restriction is that raw data storage methods in Format-Y is a - superset of raw data storage methods in Format-X (otherwise the - raw data must be copied to Format-Y). We're assuming that meta - data is small wrt the entire file. - -

      The wrapper can be a separate file that has pointers into the - first file or it can be contained within the first file. If - contained in a single file, the file can appear as a Format-Y - file or simultaneously a Format-Y and Format-X file. - -

      The Format-X meta-data can be thought of as the original - wrapper around raw data and Format-Y is a second wrapper around - the same data. The wrappers are independend of one another; - modifying the meta-data in one wrapper causes the other to - become out of date. Modification of raw data doesn't invalidate - either view as long as the meta data that describes its storage - isn't modifed. For instance, an array element can change values - if storage is already allocated for the element, but if storage - isn't allocated then the meta data describing the storage must - change, invalidating all wrappers but one. - -

      It's perfectly legal to modify the meta data of one wrapper - without modifying the meta data in the other wrapper(s). The - illegal part is accessing the raw data through a wrapper which - is out of date. - -

      If raw data is wrapped by more than one internal wrapper - (internal means that the wrapper is in the same file as - the raw data) then access to that file must assume that - unreferenced parts of that file contain meta data for another - wrapper and cannot be reclaimed as free memory. - -


      -

      Implementation of Condition B

      - -

      Since this is a temporary situation which can't be - automatically detected by the HDF5 library, we must rely - on the application to notify the HDF5 library whether or not it - must satisfy Condition B. (Even if we don't rely on the - application, at some point someone is going to remove the - Condition B constraint from the library.) So the module that - handles Condition B is conditionally compiled and then enabled - on a per-file basis. - -

      If the application desires to produce an HDF4 file (determined - by arguments to H5Fopen), and the Condition B - module is compiled into the library, then H5Fclose - calls the module to traverse the HDF5 wrapper and generate an - additional internal or external HDF4 wrapper (wrapper specifics - are described below). If Condition B is implemented as a module - then it can benefit from the metadata already cached by the main - library. - -

      An internal HDF4 wrapper would be used if the HDF5 file is - writable and the user doesn't mind that the HDF5 file is - modified. An external wrapper would be used if the file isn't - writable or if the user wants the data file to be primarily HDF5 - but a few applications need an HDF4 view of the data. - -

      Modifying through the HDF5 library an HDF5 file that has - internal HDF4 wrapper should invalidate the HDF4 wrapper (and - optionally regenerate it when H5Fclose is - called). The HDF5 library must understand how wrappers work, but - not necessarily anything about the HDF4 file format. - -

      Modifying through the HDF5 library an HDF5 file that has an - external HDF4 wrapper will cause the HDF4 wrapper to become out - of date (but possibly regenerated during H5Fclose). - Note: Perhaps the next release of the HDF4 library should - insure that the HDF4 wrapper file has a more recent modification - time than the raw data file (the HDF5 file) to which it - points(?) - -

      Modifying through the HDF4 library an HDF5 file that has an - internal or external HDF4 wrapper will cause the HDF5 wrapper to - become out of date. However, there is now way for the old HDF4 - library to notify the HDF5 wrapper that it's out of date. - Therefore the HDF5 library must be able to detect when the HDF5 - wrapper is out of date and be able to fix it. If the HDF4 - wrapper is complete then the easy way is to ignore the original - HDF5 wrapper and generate a new one from the HDF4 wrapper. The - other approach is to compare the HDF4 and HDF5 wrappers and - assume that if they differ HDF4 is the right one, if HDF4 omits - data then it was because HDF4 is a partial wrapper (rather than - assume HDF4 deleted the data), and if HDF4 has new data then - copy the new meta data to the HDF5 wrapper. On the other hand, - perhaps we don't need to allow these situations (modifying an - HDF5 file with the old HDF4 library and then accessing it with - the HDF5 library is either disallowed or causes HDF5 objects - that can't be described by HDF4 to be lost). - -

      To convert an HDF5 file to an HDF4 file on demand, one simply - opens the file with the HDF4 flag and closes it. This is also - how AIO implemented backward compatability with PDB in its file - format. - -


      -

      Implementation of Condition C

      - -

      This condition must be satisfied for all time because there - will always be archived HDF4 files. If a pure HDF4 file (that - is, one without HDF5 meta data) is opened with an HDF5 library, - the H5Fopen builds an internal or external HDF5 - wrapper and then accesses the raw data through that wrapper. If - the HDF5 library modifies the file then the HDF4 wrapper becomes - out of date. However, since the HDF5 library hasn't been - released, we can at least implement it to disable and/or reclaim - the HDF4 wrapper. - -

      If an external and temporary HDF5 wrapper is desired, the - wrapper is created through the cache like all other HDF5 files. - The data appears on disk only if a particular cached datum is - preempted. Instead of calling H5Fclose on the HDF5 - wrapper file we call H5Fabort which immediately - releases all file resources without updating the file, and then - we unlink the file from Unix. - -


      -

      What do wrappers look like?

      - -

      External wrappers are quite obvious: they contain only things - from the format specs for the wrapper and nothing from the - format specs of the format which they wrap. - -

      An internal HDF4 wrapper is added to an HDF5 file in such a way - that the file appears to be both an HDF4 file and an HDF5 - file. HDF4 requires an HDF4 file header at file offset zero. If - a user block is present then we just move the user block down a - bit (and truncate it) and insert the minimum HDF4 signature. - The HDF4 dd list and any other data it needs are - appended to the end of the file and the HDF5 signature uses the - logical file length field to determine the beginning of the - trailing part of the wrapper. - -

      -

      - - - - - - - - - - - - - -
      HDF4 minimal file header. Its main job is to point to - the dd list at the end of the file.
      User-defined block which is truncated by the size of the - HDF4 file header so that the HDF5 boot block file address - doesn't change.
      The HDF5 boot block and data, unmodified by adding the - HDF4 wrapper.
      The main part of the HDF4 wrapper. The dd - list will have entries for all parts of the file so - hdpack(?) doesn't (re)move anything.
      -
      - -

      When such a file is opened by the HDF5 library for - modification it shifts the user block back down to address zero - and fills with zeros, then truncates the file at the end of the - HDF5 data or adds the trailing HDF4 wrapper to the free - list. This prevents HDF4 applications from reading the file with - an out of date wrapper. - -

      If there is no user block then we have a problem. The HDF5 - boot block must be moved to make room for the HDF4 file header. - But moving just the boot block causes problems because all file - addresses stored in the file are relative to the boot block - address. The only option is to shift the entire file contents - by 512 bytes to open up a user block (too bad we don't have - hooks into the Unix i-node stuff so we could shift the entire - file contents by the size of a file system page without ever - performing I/O on the file :-) - -

      Is it possible to place an HDF5 wrapper in an HDF4 file? I - don't know enough about the HDF4 format, but I would suspect it - might be possible to open a hole at file address 512 (and - possibly before) by moving some things to the end of the file - to make room for the HDF5 signature. The remainder of the HDF5 - wrapper goes at the end of the file and entries are added to the - HDF4 dd list to mark the location(s) of the HDF5 - wrapper. - -


      -

      Other Thoughts

      - -

      Conversion programs that copy an entire HDF4 file to a separate, - self-contained HDF5 file and vice versa might be useful. - - - - -


      -
      Robb Matzke
      - - -Last modified: Wed Oct 8 12:34:42 EST 1997 - - - diff --git a/doc/html/TechNotes/HeapMgmt.html b/doc/html/TechNotes/HeapMgmt.html deleted file mode 100644 index e9e8db4..0000000 --- a/doc/html/TechNotes/HeapMgmt.html +++ /dev/null @@ -1,84 +0,0 @@ - - - -

      Heap Management in HDF5

      - -
      -
      -Heap functions are in the H5H package.
      -
      -
      -off_t
      -H5H_new (hdf5_file_t *f, size_t size_hint, size_t realloc_hint);
      -
      -	Creates a new heap in the specified file which can efficiently
      -	store at least SIZE_HINT bytes.  The heap can store more than
      -	that, but doing so may cause the heap to become less efficient
      -	(for instance, a heap implemented as a B-tree might become
      -	discontigous).  The REALLOC_HINT is the minimum number of bytes
      -	by which the heap will grow when it must be resized. The hints
      -	may be zero in which case reasonable (but probably not
      -	optimal) values will be chosen.
      -
      -	The return value is the address of the new heap relative to
      -	the beginning of the file boot block.
      -
      -off_t
      -H5H_insert (hdf5_file_t *f, off_t addr, size_t size, const void *buf);
      -
      -	Copies SIZE bytes of data from BUF into the heap whose address
      -	is ADDR in file F.  BUF must be the _entire_ heap object.  The
      -	return value is the byte offset of the new data in the heap.
      -
      -void *
      -H5H_read (hdf5_file_t *f, off_t addr, off_t offset, size_t size, void *buf);
      -
      -	Copies SIZE bytes of data from the heap whose address is ADDR
      -	in file F into BUF and then returns the address of BUF.  If
      -	BUF is the null pointer then a new buffer will be malloc'd by
      -	this function and its address is returned.
      -
      -	Returns buffer address or null.
      -
      -const void *
      -H5H_peek (hdf5_file_t *f, off_t addr, off_t offset)
      -
      -	A more efficient version of H5H_read that returns a pointer
      -	directly into the cache; the data is not copied from the cache
      -	to a buffer.  The pointer is valid until the next call to an
      -	H5AC function directly or indirectly.
      -
      -	Returns a pointer or null.  Do not free the pointer.
      -
      -void *
      -H5H_write (hdf5_file_t *f, off_t addr, off_t offset, size_t size,
      -           const void *buf);
      -
      -	Modifies (part of) an object in the heap at address ADDR of
      -	file F by copying SIZE bytes from the beginning of BUF to the
      -	file.  OFFSET is the address withing the heap where the output
      -	is to occur.
      -
      -	This function can fail if the combination of OFFSET and SIZE
      -	would write over a boundary between two heap objects.
      -
      -herr_t
      -H5H_remove (hdf5_file_t *f, off_t addr, off_t offset, size_t size);
      -
      -	Removes an object or part of an object which begins at byte
      -	OFFSET within a heap whose address is ADDR in file F.  SIZE
      -	bytes are returned to the free list.  Removing the middle of
      -	an object has the side effect that one object is now split
      -	into two objects.
      -
      -	Returns success or failure.
      -
      -===========================================
      -Last Modified:  8 July 1998 (technical content)
      -Last Modified:  28 April 2000 (included in HDF5 Technical Notes)
      -HDF Help Desk:  hdfhelp@ncsa.uiuc.edu
      -
      -
      - - - diff --git a/doc/html/TechNotes/IOPipe.html b/doc/html/TechNotes/IOPipe.html deleted file mode 100644 index 7c24e2c..0000000 --- a/doc/html/TechNotes/IOPipe.html +++ /dev/null @@ -1,114 +0,0 @@ - - - - The Raw Data I/O Pipeline - - - -

      The Raw Data I/O Pipeline

      - -

      The HDF5 raw data pipeline is a complicated beast that handles - all aspects of raw data storage and transfer of that data - between the file and the application. Data can be stored - contiguously (internal or external), in variable size external - segments, or regularly chunked; it can be sparse, extendible, - and/or compressible. Data transfers must be able to convert from - one data space to another, convert from one number type to - another, and perform partial I/O operations. Furthermore, - applications will expect their common usage of the pipeline to - perform well. - -

      To accomplish these goals, the pipeline has been designed in a - modular way so no single subroutine is overly complicated and so - functionality can be inserted easily at the appropriate - locations in the pipeline. A general pipeline was developed and - then certain paths through the pipeline were optimized for - performance. - -

      We describe only the file-to-memory side of the pipeline since - the memory-to-file side is a mirror image. We also assume that a - proper hyperslab of a simple data space is being read from the - file into a proper hyperslab of a simple data space in memory, - and that the data type is a compound type which may require - various number conversions on its members. - - Figure 1 - -

      The diagrams should be read from the top down. The Line A - in the figure above shows that H5Dread() copies - data from a hyperslab of a file dataset to a hyperslab of an - application buffer by calling H5D_read(). And - H5D_read() calls, in a loop, - H5S_simp_fgath(), H5T_conv_struct(), - and H5S_simp_mscat(). A temporary buffer, TCONV, is - loaded with data points from the file, then data type conversion - is performed on the temporary buffer, and finally data points - are scattered out to application memory. Thus, data type - conversion is an in-place operation and data space conversion - consists of two steps. An additional temporary buffer, BKG, is - large enough to hold N instances of the destination - data type where N is the same number of data points - that can be held by the TCONV buffer (which is large enough to - hold either source or destination data points). - -

      The application sets an upper limit for the size of the TCONV - buffer and optionally supplies a buffer. If no buffer is - supplied then one will be created by calling - malloc() when the pipeline is executed (when - necessary) and freed when the pipeline exits. The size of the - BKG buffer depends on the size of the TCONV buffer and if the - application supplies a BKG buffer it should be at least as large - as the TCONV buffer. The default size for these buffers is one - megabyte but the buffer might not be used to full capacity if - the buffer size is not an integer multiple of the source or - destination data point size (whichever is larger, but only - destination for the BKG buffer). - - - -

      Occassionally the destination data points will be partially - initialized and the H5Dread() operation should not - clobber those values. For instance, the destination type might - be a struct with members a and b where - a is already initialized and we're reading - b from the file. An extra line, G, is added to the - pipeline to provide the type conversion functions with the - existing data. - - Figure 2 - -

      It will most likely be quite common that no data type - conversion is necessary. In such cases a temporary buffer for - data type conversion is not needed and data space conversion - can happen in a single step. In fact, when the source and - destination data are both contiguous (they aren't in the - picture) the loop degenerates to a single iteration. - - - Figure 3 - -

      So far we've looked only at internal contiguous storage, but by - replacing Line B in Figures 1 and 2 and Line A in Figure 3 with - Figure 4 the pipeline is able to handle regularly chunked - objects. Line B of Figure 4 is executed once for each chunk - which contains data to be read and the chunk address is found by - looking at a multi-dimensional key in a chunk B-tree which has - one entry per chunk. - - Figure 4 - -

      If a single chunk is requested and the destination buffer is - the same size/shape as the chunk, then the CHUNK buffer is - bypassed and the destination buffer is used instead as shown in - Figure 5. - - Figure 5 - -


      -
      Robb Matzke
      - - -Last modified: Wed Mar 18 10:38:30 EST 1998 - - - diff --git a/doc/html/TechNotes/LibMaint.html b/doc/html/TechNotes/LibMaint.html deleted file mode 100644 index 718f085..0000000 --- a/doc/html/TechNotes/LibMaint.html +++ /dev/null @@ -1,128 +0,0 @@ - - - - -

      Information for HDF5 Maintainers

      - -
      -
      -* You can run make from any directory.  However, running in a
      -  subdirectory only knows how to build things in that directory and
      -  below.  However, all makefiles know when their target depends on
      -  something outside the local directory tree:
      -
      -	$ cd test
      -	$ make
      -	make: *** No rule to make target ../src/libhdf5.a
      -
      -* All Makefiles understand the following targets:
      -
      -        all              -- build locally.
      -        install          -- install libs, headers, progs.
      -        uninstall        -- remove installed files.
      -        mostlyclean      -- remove temp files (eg, *.o but not *.a).
      -        clean            -- mostlyclean plus libs and progs.
      -        distclean        -- all non-distributed files.
      -        maintainer-clean -- all derived files but H5config.h.in and configure.
      -
      -* Most Makefiles also understand:
      -
      -	TAGS		-- build a tags table
      -	dep, depend	-- recalculate source dependencies
      -	lib		-- build just the libraries w/o programs
      -
      -* If you have personal preferences for which make, compiler, compiler
      -  flags, preprocessor flags, etc., that you use and you don't want to
      -  set environment variables, then use a site configuration file.
      -
      -  When configure starts, it looks in the config directory for files
      -  whose name is some combination of the CPU name, vendor, and
      -  operating system in this order:
      -
      -	CPU-VENDOR-OS
      -	VENDOR-OS
      -	CPU-VENDOR
      -	OS
      -	VENDOR
      -	CPU
      -
      -  The first file which is found is sourced and can therefore affect
      -  the behavior of the rest of configure. See config/BlankForm for the
      -  template.
      -
      -* If you use GNU make along with gcc the Makefile will contain targets
      -  that automatically maintain a list of source interdependencies; you
      -  seldom have to say `make clean'.  I say `seldom' because if you
      -  change how one `*.h' file includes other `*.h' files you'll have
      -  to force an update.
      -
      -  To force an update of all dependency information remove the
      -  `.depend' file from each directory and type `make'.  For
      -  instance:
      -
      -	$ cd $HDF5_HOME
      -	$ find . -name .depend -exec rm {} \;
      -	$ make
      -
      -  If you're not using GNU make and gcc then dependencies come from
      -  ".distdep" files in each directory.  Those files are generated on
      -  GNU systems and inserted into the Makefile's by running
      -  config.status (which happens near the end of configure).
      -
      -* If you use GNU make along with gcc then the Perl script `trace' is
      -  run just before dependencies are calculated to update any H5TRACE()
      -  calls that might appear in the file.  Otherwise, after changing the
      -  type of a function (return type or argument types) one should run
      -  `trace' manually on those source files (e.g., ../bin/trace *.c).
      -
      -* Object files stay in the directory and are added to the library as a
      -  final step instead of placing the file in the library immediately
      -  and removing it from the directory.  The reason is three-fold:
      -
      -	1.  Most versions of make don't allow `$(LIB)($(SRC:.c=.o))'
      -	    which makes it necessary to have two lists of files, one
      -	    that ends with `.c' and the other that has the library
      -	    name wrapped around each `.o' file.
      -
      -	2.  Some versions of make/ar have problems with modification
      -	    times of archive members.
      -
      -	3.  Adding object files immediately causes problems on SMP
      -	    machines where make is doing more than one thing at a
      -	    time.
      -
      -* When using GNU make on an SMP you can cause it to compile more than
      -  one thing at a time.  At the top of the source tree invoke make as
      -
      -	$ make -j -l6
      -
      -  which causes make to fork as many children as possible as long as
      -  the load average doesn't go above 6.  In subdirectories one can say
      -
      -	$ make -j2
      -
      -  which limits the number of children to two (this doesn't work at the
      -  top level because the `-j2' is not passed to recursive makes).
      -
      -* To create a release tarball go to the top-level directory and run
      -  ./bin/release.  You can optionally supply one or more of the words
      -  `tar', `gzip', `bzip2' or `compress' on the command line.  The
      -  result will be a (compressed) tar file(s) in the `releases'
      -  directory.  The README file is updated to contain the release date
      -  and version number.
      -
      -* To create a tarball of all the files which are part of HDF5 go to
      -  the top-level directory and type:
      -
      -      tar cvf foo.tar `grep '^\.' MANIFEST |unexpand |cut -f1`
      -
      -
      -===========================================
      -Last Modified:  15 October 1999 (technical content)
      -Last Modified:  28 April 2000 (included in HDF5 Technical Notes)
      -HDF Help Desk:  hdfhelp@ncsa.uiuc.edu
      -
      -
      - - - diff --git a/doc/html/TechNotes/Makefile.am b/doc/html/TechNotes/Makefile.am deleted file mode 100644 index a0aca2d..0000000 --- a/doc/html/TechNotes/Makefile.am +++ /dev/null @@ -1,25 +0,0 @@ -# HDF5 Library Doc Makefile(.in) -# -# Copyright (C) 1997, 2002 -# National Center for Supercomputing Applications. -# All rights reserved. -# -## -## Makefile.am -## Run automake to generate a Makefile.in from this file. -# - -include $(top_srcdir)/config/commence-doc.am - -localdocdir = $(docdir)/hdf5/TechNotes - -# Public doc files (to be installed)... -localdoc_DATA=BigDataSmMach.html ChStudy_1000x1000.gif ChStudy_250x250.gif \ - ChStudy_499x499.gif ChStudy_5000x1000.gif ChStudy_500x500.gif \ - ChStudy_p1.gif ChunkingStudy.html CodeReview.html \ - ExternalFiles.html FreeLists.html H4-H5Compat.html HeapMgmt.html \ - IOPipe.html LibMaint.html MemoryMgmt.html MoveDStruct.html \ - NamingScheme.html ObjectHeader.html RawDStorage.html \ - SWControls.html SymbolTables.html ThreadSafeLibrary.html VFL.html \ - VFLfunc.html Version.html openmp-hdf5.c openmp-hdf5.html \ - pipe1.gif pipe2.gif pipe3.gif pipe4.gif pipe5.gif version.gif diff --git a/doc/html/TechNotes/Makefile.in b/doc/html/TechNotes/Makefile.in deleted file mode 100644 index 2dc4278..0000000 --- a/doc/html/TechNotes/Makefile.in +++ /dev/null @@ -1,494 +0,0 @@ -# Makefile.in generated by automake 1.9.5 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005 Free Software Foundation, Inc. -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# HDF5 Library Doc Makefile(.in) -# -# Copyright (C) 1997, 2002 -# National Center for Supercomputing Applications. -# All rights reserved. -# -# - -srcdir = @srcdir@ -top_srcdir = @top_srcdir@ -VPATH = @srcdir@ -pkgdatadir = $(datadir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -top_builddir = ../../.. -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -INSTALL = @INSTALL@ -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ - $(top_srcdir)/config/commence-doc.am \ - $(top_srcdir)/config/commence.am -subdir = doc/html/TechNotes -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/configure.in -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(SHELL) $(top_srcdir)/bin/mkinstalldirs -CONFIG_HEADER = $(top_builddir)/src/H5config.h -CONFIG_CLEAN_FILES = -SOURCES = -DIST_SOURCES = -am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; -am__vpath_adj = case $$p in \ - $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ - *) f=$$p;; \ - esac; -am__strip_dir = `echo $$p | sed -e 's|^.*/||'`; -am__installdirs = "$(DESTDIR)$(localdocdir)" -localdocDATA_INSTALL = $(INSTALL_DATA) -DATA = $(localdoc_DATA) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) - -# Set the paths for AFS installs of autotools for Linux machines -# Ideally, these tools should never be needed during the build. -ACLOCAL = /afs/ncsa/projects/hdf/packages/automake_1.9.5/Linux_2.4/bin/aclocal -I /afs/ncsa/projects/hdf/packages/libtool_1.5.14/Linux_2.4/share/aclocal -ADD_PARALLEL_FILES = @ADD_PARALLEL_FILES@ -AMDEP_FALSE = @AMDEP_FALSE@ -AMDEP_TRUE = @AMDEP_TRUE@ -AMTAR = @AMTAR@ -AM_MAKEFLAGS = @AM_MAKEFLAGS@ -AR = @AR@ -AUTOCONF = /afs/ncsa/projects/hdf/packages/autoconf_2.59/Linux_2.4/bin/autoconf -AUTOHEADER = /afs/ncsa/projects/hdf/packages/autoconf_2.59/Linux_2.4/bin/autoheader -AUTOMAKE = /afs/ncsa/projects/hdf/packages/automake_1.9.5/Linux_2.4/bin/automake -AWK = @AWK@ -BUILD_CXX_CONDITIONAL_FALSE = @BUILD_CXX_CONDITIONAL_FALSE@ -BUILD_CXX_CONDITIONAL_TRUE = @BUILD_CXX_CONDITIONAL_TRUE@ -BUILD_FORTRAN_CONDITIONAL_FALSE = @BUILD_FORTRAN_CONDITIONAL_FALSE@ -BUILD_FORTRAN_CONDITIONAL_TRUE = @BUILD_FORTRAN_CONDITIONAL_TRUE@ -BUILD_HDF5_HL_CONDITIONAL_FALSE = @BUILD_HDF5_HL_CONDITIONAL_FALSE@ -BUILD_HDF5_HL_CONDITIONAL_TRUE = @BUILD_HDF5_HL_CONDITIONAL_TRUE@ -BUILD_PABLO_CONDITIONAL_FALSE = @BUILD_PABLO_CONDITIONAL_FALSE@ -BUILD_PABLO_CONDITIONAL_TRUE = @BUILD_PABLO_CONDITIONAL_TRUE@ -BUILD_PARALLEL_CONDITIONAL_FALSE = @BUILD_PARALLEL_CONDITIONAL_FALSE@ -BUILD_PARALLEL_CONDITIONAL_TRUE = @BUILD_PARALLEL_CONDITIONAL_TRUE@ -BUILD_PDB2HDF = @BUILD_PDB2HDF@ -BUILD_PDB2HDF_CONDITIONAL_FALSE = @BUILD_PDB2HDF_CONDITIONAL_FALSE@ -BUILD_PDB2HDF_CONDITIONAL_TRUE = @BUILD_PDB2HDF_CONDITIONAL_TRUE@ -BYTESEX = @BYTESEX@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CC_VERSION = @CC_VERSION@ -CFLAGS = @CFLAGS@ -CONFIG_DATE = @CONFIG_DATE@ -CONFIG_MODE = @CONFIG_MODE@ -CONFIG_USER = @CONFIG_USER@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CXX = @CXX@ -CXXCPP = @CXXCPP@ -CXXDEPMODE = @CXXDEPMODE@ -CXXFLAGS = @CXXFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEBUG_PKG = @DEBUG_PKG@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -DYNAMIC_DIRS = @DYNAMIC_DIRS@ -ECHO = @ECHO@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -F77 = @F77@ - -# Make sure that these variables are exported to the Makefiles -F9XMODEXT = @F9XMODEXT@ -F9XMODFLAG = @F9XMODFLAG@ -F9XSUFFIXFLAG = @F9XSUFFIXFLAG@ -FC = @FC@ -FCFLAGS = @FCFLAGS@ -FCLIBS = @FCLIBS@ -FFLAGS = @FFLAGS@ -FILTERS = @FILTERS@ -FSEARCH_DIRS = @FSEARCH_DIRS@ -H5_VERSION = @H5_VERSION@ -HADDR_T = @HADDR_T@ -HDF5_INTERFACES = @HDF5_INTERFACES@ -HID_T = @HID_T@ -HL = @HL@ -HL_FOR = @HL_FOR@ -HSIZET = @HSIZET@ -HSIZE_T = @HSIZE_T@ -HSSIZE_T = @HSSIZE_T@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -INSTRUMENT_LIBRARY = @INSTRUMENT_LIBRARY@ -LDFLAGS = @LDFLAGS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LIBTOOL = @LIBTOOL@ -LN_S = @LN_S@ -LTLIBOBJS = @LTLIBOBJS@ -LT_STATIC_EXEC = @LT_STATIC_EXEC@ -MAINT = @MAINT@ -MAINTAINER_MODE_FALSE = @MAINTAINER_MODE_FALSE@ -MAINTAINER_MODE_TRUE = @MAINTAINER_MODE_TRUE@ -MAKEINFO = @MAKEINFO@ -MPE = @MPE@ -OBJECT_NAMELEN_DEFAULT_F = @OBJECT_NAMELEN_DEFAULT_F@ -OBJEXT = @OBJEXT@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PARALLEL = @PARALLEL@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PERL = @PERL@ -PTHREAD = @PTHREAD@ -RANLIB = @RANLIB@ -ROOT = @ROOT@ -RUNPARALLEL = @RUNPARALLEL@ -RUNSERIAL = @RUNSERIAL@ -R_INTEGER = @R_INTEGER@ -R_LARGE = @R_LARGE@ -SEARCH = @SEARCH@ -SETX = @SETX@ -SET_MAKE = @SET_MAKE@ - -# Hardcode SHELL to be /bin/sh. Most machines have this shell, and -# on at least one machine configure fails to detect its existence (janus). -# Also, when HDF5 is configured on one machine but run on another, -# configure's automatic SHELL detection may not work on the build machine. -SHELL = /bin/sh -SIZE_T = @SIZE_T@ -STATIC_SHARED = @STATIC_SHARED@ -STRIP = @STRIP@ -TESTPARALLEL = @TESTPARALLEL@ -TRACE_API = @TRACE_API@ -USE_FILTER_DEFLATE = @USE_FILTER_DEFLATE@ -USE_FILTER_FLETCHER32 = @USE_FILTER_FLETCHER32@ -USE_FILTER_NBIT = @USE_FILTER_NBIT@ -USE_FILTER_SCALEOFFSET = @USE_FILTER_SCALEOFFSET@ -USE_FILTER_SHUFFLE = @USE_FILTER_SHUFFLE@ -USE_FILTER_SZIP = @USE_FILTER_SZIP@ -VERSION = @VERSION@ -ac_ct_AR = @ac_ct_AR@ -ac_ct_CC = @ac_ct_CC@ -ac_ct_CXX = @ac_ct_CXX@ -ac_ct_F77 = @ac_ct_F77@ -ac_ct_FC = @ac_ct_FC@ -ac_ct_RANLIB = @ac_ct_RANLIB@ -ac_ct_STRIP = @ac_ct_STRIP@ -am__fastdepCC_FALSE = @am__fastdepCC_FALSE@ -am__fastdepCC_TRUE = @am__fastdepCC_TRUE@ -am__fastdepCXX_FALSE = @am__fastdepCXX_FALSE@ -am__fastdepCXX_TRUE = @am__fastdepCXX_TRUE@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_vendor = @build_vendor@ -datadir = @datadir@ -exec_prefix = @exec_prefix@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ - -# Install directories that automake doesn't know about -includedir = $(exec_prefix)/include -infodir = @infodir@ -install_sh = @install_sh@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localstatedir = @localstatedir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -oldincludedir = @oldincludedir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ - -# Shell commands used in Makefiles -RM = rm -f -CP = cp - -# Some machines need a command to run executables; this is that command -# so that our tests will run. -# We use RUNTESTS instead of RUNSERIAL directly because it may be that -# some tests need to be run with a different command. Older versions -# of the makefiles used the command -# $(LIBTOOL) --mode=execute -# in some directories, for instance. -RUNTESTS = $(RUNSERIAL) - -# Libraries to link to while building -LIBHDF5 = $(top_builddir)/src/libhdf5.la -LIBH5TEST = $(top_builddir)/test/libh5test.la -LIBH5F = $(top_builddir)/fortran/src/libhdf5_fortran.la -LIBH5FTEST = $(top_builddir)/fortran/test/libh5test_fortran.la -LIBH5CPP = $(top_builddir)/c++/src/libhdf5_cpp.la -LIBH5TOOLS = $(top_builddir)/tools/lib/libh5tools.la -LIBH5_HL = $(top_builddir)/hl/src/libhdf5_hl.la -LIBH5F_HL = $(top_builddir)/hl/fortran/src/libhdf5hl_fortran.la -LIBH5CPP_HL = $(top_builddir)/hl/c++/src/libhdf5_hl_cpp.la -docdir = $(exec_prefix)/doc - -# Scripts used to build examples -H5CC = $(bindir)/h5cc -H5CC_PP = $(bindir)/h5pcc -H5FC = $(bindir)/h5fc -H5FC_PP = $(bindir)/h5pfc - -# .chkexe and .chksh files are used to mark tests that have run successfully. -MOSTLYCLEANFILES = *.chkexe *.chksh -localdocdir = $(docdir)/hdf5/TechNotes - -# Public doc files (to be installed)... -localdoc_DATA = BigDataSmMach.html ChStudy_1000x1000.gif ChStudy_250x250.gif \ - ChStudy_499x499.gif ChStudy_5000x1000.gif ChStudy_500x500.gif \ - ChStudy_p1.gif ChunkingStudy.html CodeReview.html \ - ExternalFiles.html FreeLists.html H4-H5Compat.html HeapMgmt.html \ - IOPipe.html LibMaint.html MemoryMgmt.html MoveDStruct.html \ - NamingScheme.html ObjectHeader.html RawDStorage.html \ - SWControls.html SymbolTables.html ThreadSafeLibrary.html VFL.html \ - VFLfunc.html Version.html openmp-hdf5.c openmp-hdf5.html \ - pipe1.gif pipe2.gif pipe3.gif pipe4.gif pipe5.gif version.gif - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/config/commence-doc.am $(top_srcdir)/config/commence.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \ - && exit 0; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign doc/html/TechNotes/Makefile'; \ - cd $(top_srcdir) && \ - $(AUTOMAKE) --foreign doc/html/TechNotes/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -mostlyclean-libtool: - -rm -f *.lo - -clean-libtool: - -rm -rf .libs _libs - -distclean-libtool: - -rm -f libtool -uninstall-info-am: -install-localdocDATA: $(localdoc_DATA) - @$(NORMAL_INSTALL) - test -z "$(localdocdir)" || $(mkdir_p) "$(DESTDIR)$(localdocdir)" - @list='$(localdoc_DATA)'; for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - f=$(am__strip_dir) \ - echo " $(localdocDATA_INSTALL) '$$d$$p' '$(DESTDIR)$(localdocdir)/$$f'"; \ - $(localdocDATA_INSTALL) "$$d$$p" "$(DESTDIR)$(localdocdir)/$$f"; \ - done - -uninstall-localdocDATA: - @$(NORMAL_UNINSTALL) - @list='$(localdoc_DATA)'; for p in $$list; do \ - f=$(am__strip_dir) \ - echo " rm -f '$(DESTDIR)$(localdocdir)/$$f'"; \ - rm -f "$(DESTDIR)$(localdocdir)/$$f"; \ - done -tags: TAGS -TAGS: - -ctags: CTAGS -CTAGS: - - -distdir: $(DISTFILES) - $(mkdir_p) $(distdir)/../../../config - @srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's|.|.|g'`; \ - list='$(DISTFILES)'; for file in $$list; do \ - case $$file in \ - $(srcdir)/*) file=`echo "$$file" | sed "s|^$$srcdirstrip/||"`;; \ - $(top_srcdir)/*) file=`echo "$$file" | sed "s|^$$topsrcdirstrip/|$(top_builddir)/|"`;; \ - esac; \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - dir=`echo "$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test "$$dir" != "$$file" && test "$$dir" != "."; then \ - dir="/$$dir"; \ - $(mkdir_p) "$(distdir)$$dir"; \ - else \ - dir=''; \ - fi; \ - if test -d $$d/$$file; then \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \ - fi; \ - cp -pR $$d/$$file $(distdir)$$dir || exit 1; \ - else \ - test -f $(distdir)/$$file \ - || cp -p $$d/$$file $(distdir)/$$file \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: - for dir in "$(DESTDIR)$(localdocdir)"; do \ - test -z "$$dir" || $(mkdir_p) "$$dir"; \ - done -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - `test -z '$(STRIP)' || \ - echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install -mostlyclean-generic: - -test -z "$(MOSTLYCLEANFILES)" || rm -f $(MOSTLYCLEANFILES) - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." -clean: clean-am - -clean-am: clean-generic clean-libtool mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic distclean-libtool - -dvi: dvi-am - -dvi-am: - -html: html-am - -info: info-am - -info-am: - -install-data-am: install-localdocDATA - -install-exec-am: - -install-info: install-info-am - -install-man: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic mostlyclean-libtool - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: uninstall-info-am uninstall-localdocDATA - -.PHONY: all all-am check check-am clean clean-generic clean-libtool \ - distclean distclean-generic distclean-libtool distdir dvi \ - dvi-am html html-am info info-am install install-am \ - install-data install-data-am install-exec install-exec-am \ - install-info install-info-am install-localdocDATA install-man \ - install-strip installcheck installcheck-am installdirs \ - maintainer-clean maintainer-clean-generic mostlyclean \ - mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ - uninstall uninstall-am uninstall-info-am \ - uninstall-localdocDATA - - -# Ignore most rules -lib progs check test _test check-p check-s: - @echo "Nothing to be done" - -tests dep depend: - @@SETX@; for d in X $(SUBDIRS); do \ - if test $$d != X; then \ - (cd $$d && $(MAKE) $(AM_MAKEFLAGS) $@) || exit 1; \ - fi; - done - -# In docs directory, install-doc is the same as install -install-doc install-all: - $(MAKE) $(AM_MAKEFLAGS) install -uninstall-doc uninstall-all: - $(MAKE) $(AM_MAKEFLAGS) uninstall -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/doc/html/TechNotes/MemoryMgmt.html b/doc/html/TechNotes/MemoryMgmt.html deleted file mode 100644 index 93782b5..0000000 --- a/doc/html/TechNotes/MemoryMgmt.html +++ /dev/null @@ -1,510 +0,0 @@ - - - - Memory Management in HDF5 - - - -

      Memory Management in HDF5

      - - -

      Is a Memory Manager Necessary?

      - -

      Some form of memory management may be necessary in HDF5 when - the various deletion operators are implemented so that the - file memory is not permanently orphaned. However, since an - HDF5 file was designed with persistent data in mind, the - importance of a memory manager is questionable. - -

      On the other hand, when certain meta data containers (file glue) - grow, they may need to be relocated in order to keep the - container contiguous. - -

      - Example: An object header consists of up to two - chunks of contiguous memory. The first chunk is a fixed - size at a fixed location when the header link count is - greater than one. Thus, inserting additional items into an - object header may require the second chunk to expand. When - this occurs, the second chunk may need to move to another - location in the file, freeing the file memory which that - chunk originally occupied. -
      - -

      The relocation of meta data containers could potentially - orphan a significant amount of file memory if the application - has made poor estimates for preallocation sizes. - - -

      Levels of Memory Management

      - -

      Memory management by the library can be independent of memory - management support by the file format. The file format can - support no memory management, some memory management, or full - memory management. Similarly with the library. - -

      Support in the Library

      - -
      -
      No Support: I -
      When memory is deallocated it simply becomes unreferenced - (orphaned) in the file. Memory allocation requests are - satisfied by extending the file. - -
      A separate off-line utility can be used to detect the - unreferenced bytes of a file and "bubble" them up to the end - of the file and then truncate the file. - -
      Some Support: II -
      The library could support partial memory management all - the time, or full memory management some of the time. - Orphaning free blocks instead of adding them to a free list - should not affect the file integrity, nor should fulfilling - new requests by extending the file instead of using the free - list. - -
      Full Support: III -
      The library supports space-efficient memory management by - always fulfilling allocation requests from the free list when - possible, and by coalescing adjacent free blocks into a - single larger free block. -
      - -

      Support in the File Format

      - -
      -
      No Support: A -
      The file format does not support memory management; any - unreferenced block in the file is assumed to be free. If - the library supports full memory management then it will - have to traverse the entire file to determine which blocks - are unreferenced. - -
      Some Support: B -
      Assuming that unreferenced blocks are free can be - dangerous in a situation where the file is not consistent. - For instance, if a directory tree becomes detached from the - main directory hierarchy, then the detached directory and - everything that is referenced only through the detached - directory become unreferenced. File repair utilities will - be unable to determine which unreferenced blocks need to be - linked back into the file hierarchy. - -
      Therefore, it might be useful to keep an unsorted, - doubly-linked list of free blocks in the file. The library - can add and remove blocks from the list in constant time, - and can generate its own internal free-block data structure - in time proportional to the number of free blocks instead of - the size of the file. Additionally, a library can use a - subset of the free blocks, an alternative which is not - feasible if the file format doesn't support any form of - memory management. - -
      Full Support: C -
      The file format can mirror library data structures for - space-efficient memory management. The free blocks are - linked in unsorted, doubly-linked lists with one list per - free block size. The heads of the lists are pointed to by a - B-tree whose nodes are sorted by free block size. At the - same time, all free blocks are the leaf nodes of another - B-tree sorted by starting and ending address. When the - trees are used in combination we can deallocate and allocate - memory in O(log N) time where N is the - number of free blocks. -
      - -

      Combinations of Library and File Format Support

      - -

      We now evaluate each combination of library support with file - support: - -

      -
      I-A -
      If neither the library nor the file support memory - management, then each allocation request will come from the - end of the file and each deallocation request is a no-op - that simply leaves the free block unreferenced. - -
        -
      • Advantages -
          -
        • No file overhead for allocation or deallocation. -
        • No library overhead for allocation or - deallocation. -
        • No file traversal required at time of open. -
        • No data needs to be written back to the file when - it's closed. -
        • Trivial to implement (already implemented). -
        - -
      • Disadvantages -
          -
        • Inefficient use of file space. -
        • A file repair utility must reclaim lost file space. -
        • Difficulties for file repair utilities. (Is an - unreferenced block a free block or orphaned data?) -
        -
      - -
      II-A -
      In order for the library to support memory management, it - will be required to build the internal free block - representation by traversing the entire file looking for - unreferenced blocks. - -
        -
      • Advantages -
          -
        • No file overhead for allocation or deallocation. -
        • Variable amount of library overhead for allocation - and deallocation depending on how much work the - library wants to do. -
        • No data needs to be written back to the file when - it's closed. -
        • Might use file space efficiently. -
        -
      • Disadvantages -
          -
        • Might use file space inefficiently. -
        • File traversal required at time of open. -
        • A file repair utility must reclaim lost file space. -
        • Difficulties for file repair utilities. -
        • Sharing of the free list between processes falls - outside the HDF5 file format documentation. -
        -
      - -
      III-A -
      In order for the library to support full memory - management, it will be required to build the internal free - block representation by traversing the entire file looking - for unreferenced blocks. - -
        -
      • Advantages -
          -
        • No file overhead for allocation or deallocation. -
        • Efficient use of file space. -
        • No data needs to be written back to the file when - it's closed. -
        -
      • Disadvantages -
          -
        • Moderate amount of library overhead for allocation - and deallocation. -
        • File traversal required at time of open. -
        • A file repair utility must reclaim lost file space. -
        • Difficulties for file repair utilities. -
        • Sharing of the free list between processes falls - outside the HDF5 file format documentation. -
        -
      - -
      I-B -
      If the library doesn't support memory management but the - file format supports some level of management, then a file - repair utility will have to be run occasionally to reclaim - unreferenced blocks. - -
        -
      • Advantages -
          -
        • No file overhead for allocation or deallocation. -
        • No library overhead for allocation or - deallocation. -
        • No file traversal required at time of open. -
        • No data needs to be written back to the file when - it's closed. -
        -
      • Disadvantages -
          -
        • A file repair utility must reclaim lost file space. -
        • Difficulties for file repair utilities. -
        -
      - -
      II-B -
      Both the library and the file format support some level - of memory management. - -
        -
      • Advantages -
          -
        • Constant file overhead per allocation or - deallocation. -
        • Variable library overhead per allocation or - deallocation depending on how much work the library - wants to do. -
        • Traversal at file open time is on the order of the - free list size instead of the file size. -
        • The library has the option of reading only part of - the free list. -
        • No data needs to be written at file close time if - it has been amortized into the cost of allocation - and deallocation. -
        • File repair utilties don't have to be run to - reclaim memory. -
        • File repair utilities can detect whether an - unreferenced block is a free block or orphaned data. -
        • Sharing of the free list between processes might - be easier. -
        • Possible efficient use of file space. -
        -
      • Disadvantages -
          -
        • Possible inefficient use of file space. -
        -
      - -
      III-B -
      The library provides space-efficient memory management but - the file format only supports an unsorted list of free - blocks. - -
        -
      • Advantages -
          -
        • Constant time file overhead per allocation or - deallocation. -
        • No data needs to be written at file close time if - it has been amortized into the cost of allocation - and deallocation. -
        • File repair utilities don't have to be run to - reclaim memory. -
        • File repair utilities can detect whether an - unreferenced block is a free block or orphaned data. -
        • Sharing of the free list between processes might - be easier. -
        • Efficient use of file space. -
        -
      • Disadvantages -
          -
        • O(log N) library overhead per allocation or - deallocation where N is the total number of - free blocks. -
        • O(N) time to open a file since the entire - free list must be read to construct the in-core - trees used by the library. -
        • Library is more complicated. -
        -
      - -
      I-C -
      This has the same advantages and disadvantages as I-C with - the added disadvantage that the file format is much more - complicated. - -
      II-C -
      If the library only provides partial memory management but - the file requires full memory management, then this method - degenerates to the same as II-A with the added disadvantage - that the file format is much more complicated. - -
      III-C -
      The library and file format both provide complete data - structures for space-efficient memory management. - -
        -
      • Advantages -
          -
        • Files can be opened in constant time since the - free list is read on demand and amortised into the - allocation and deallocation requests. -
        • No data needs to be written back to the file when - it's closed. -
        • File repair utilities don't have to be run to - reclaim memory. -
        • File repair utilities can detect whether an - unreferenced block is a free block or orphaned data. -
        • Sharing the free list between processes is easy. -
        • Efficient use of file space. -
        -
      • Disadvantages -
          -
        • O(log N) file allocation and deallocation - cost where N is the total number of free - blocks. -
        • O(log N) library allocation and - deallocation cost. -
        • Much more complicated file format. -
        • More complicated library. -
        -
      - -
      - - -

      The Algorithm for II-B

      - -

      The file contains an unsorted, doubly-linked list of free - blocks. The address of the head of the list appears in the - boot block. Each free block contains the following fields: - -

      - - - - - - - - - - - - - - - - - - - - - -
      bytebytebytebyte
      Free Block Signature
      Total Free Block Size
      Address of Left Sibling
      Address of Right Sibling


      Remainder of Free Block


      -
      - -

      The library reads as much of the free list as convenient when - convenient and pushes those entries onto stacks. This can - occur when a file is opened or any time during the life of the - file. There is one stack for each free block size and the - stacks are sorted by size in a balanced tree in memory. - -

      Deallocation involves finding the correct stack or creating - a new one (an O(log K) operation where K is - the number of stacks), pushing the free block info onto the - stack (a constant-time operation), and inserting the free - block into the file free block list (a constant-time operation - which doesn't necessarily involve any I/O since the free blocks - can be cached like other objects). No attempt is made to - coalesce adjacent free blocks into larger blocks. - -

      Allocation involves finding the correct stack (an O(log - K) operation), removing the top item from the stack - (a constant-time operation), and removing the block from the - file free block list (a constant-time operation). If there is - no free block of the requested size or larger, then the file - is extended. - -

      To provide sharability of the free list between processes, - the last step of an allocation will check for the free block - signature and if it doesn't find one will repeat the process. - Alternatively, a process can temporarily remove free blocks - from the file and hold them in it's own private pool. - -

      To summarize... -

      -
      File opening -
      O(N) amortized over the time the file is open, - where N is the number of free blocks. The library - can still function without reading any of the file free - block list. - -
      Deallocation -
      O(log K) where K is the number of unique - sizes of free blocks. File access is constant. - -
      Allocation -
      O(log K). File access is constant. - -
      File closing -
      O(1) even if the library temporarily removes free - blocks from the file to hold them in a private pool since - the pool can still be a linked list on disk. -
      - - -

      The Algorithm for III-C

      - -

      The HDF5 file format supports a general B-tree mechanism - for storing data with keys. If we use a B-tree to represent - all parts of the file that are free and the B-tree is indexed - so that a free file chunk can be found if we know the starting - or ending address, then we can efficiently determine whether a - free chunk begins or ends at the specified address. Call this - the Address B-Tree. - -

      If a second B-tree points to a set of stacks where the - members of a particular stack are all free chunks of the same - size, and the tree is indexed by chunk size, then we can - efficiently find the best-fit chunk size for a memory request. - Call this the Size B-Tree. - -

      All free blocks of a particular size can be linked together - with an unsorted, doubly-linked, circular list and the left - and right sibling addresses can be stored within the free - chunk, allowing us to remove or insert items from the list in - constant time. - -

      Deallocation of a block fo file memory consists of: - -

        -
      1. Add the new free block whose address is ADDR to the - address B-tree. - -
          -
        1. If the address B-tree contains an entry for a free - block that ends at ADDR-1 then remove that - block from the B-tree and from the linked list (if the - block was the first on the list then the size B-tree - must be updated). Adjust the size and address of the - block being freed to include the block just removed from - the free list. The time required to search for and - possibly remove the left block is O(log N) - where N is the number of free blocks. - -
        2. If the address B-tree contains an entry for the free - block that begins at ADDR+LENGTH then - remove that block from the B-tree and from the linked - list (if the block was the first on the list then the - size B-tree must be updated). Adjust the size of the - block being freed to include the block just removed from - the free list. The time required to search for and - possibly remove the right block is O(log N). - -
        3. Add the new (adjusted) block to the address B-tree. - The time for this operation is O(log N). -
        - -
      2. Add the new block to the size B-tree and linked list. - -
          -
        1. If the size B-tree has an entry for this particular - size, then add the chunk to the tail of the list. This - is an O(log K) operation where K is - the number of unique free block sizes. - -
        2. Otherwise make a new entry in the B-tree for chunks of - this size. This is also O(log K). -
        -
      - -

      Allocation is similar to deallocation. - -

      To summarize... - -

      -
      File opening -
      O(1) - -
      Deallocation -
      O(log N) where N is the total number of - free blocks. File access time is O(log N). - -
      Allocation -
      O(log N). File access time is O(log N). - -
      File closing -
      O(1). -
      - - -
      -
      Robb Matzke
      - - -Last modified: Thu Jul 31 14:41:01 EST - - - diff --git a/doc/html/TechNotes/MoveDStruct.html b/doc/html/TechNotes/MoveDStruct.html deleted file mode 100644 index 4576bd2..0000000 --- a/doc/html/TechNotes/MoveDStruct.html +++ /dev/null @@ -1,66 +0,0 @@ - - - - Relocating a File Data Structure - - - -

      Relocating a File Data Structure

      - -

      Since file data structures can be cached in memory by the H5AC - package it becomes problematic to move such a data structure in - the file. One cannot just copy a portion of the file from one - location to another because: - -

        -
      1. the file might not contain the latest information, and
      2. -
      3. the H5AC package might not realize that the object's - address has changed and attempt to write the object to disk - at the old address.
      4. -
      - -

      Here's a correct method to move data from one location to - another. The example code assumes that one is moving a B-link - tree node from old_addr to new_addr. - -

        -
      1. Make sure the disk is up-to-date with respect to the - cache. There is no need to remove the item from the cache, - hence the final argument to H5AC_flush is - FALSE. -

        - - H5AC_flush (f, H5AC_BT, old_addr, FALSE);
        -
        -
        -
      2. - -
      3. Read the data from the old address and write it to the new - address. -

        - - H5F_block_read (f, old_addr, size, buf);
        - H5F_block_write (f, new_addr, size, buf);
        -
        -
        -
      4. - -
      5. Notify the cache that the address of the object changed. -

        - - H5AC_rename (f, H5AC_BT, old_addr, new_addr);
        -
        -
        -
      6. -
      - - - -
      -
      Robb Matzke
      - - -Last modified: Mon Jul 14 15:38:29 EST - - - diff --git a/doc/html/TechNotes/NamingScheme.html b/doc/html/TechNotes/NamingScheme.html deleted file mode 100644 index dbf55bf..0000000 --- a/doc/html/TechNotes/NamingScheme.html +++ /dev/null @@ -1,300 +0,0 @@ - - - HDF5 Naming Scheme - - - - - -

      -
      HDF5 Naming Scheme for

      - -

      -

      -

      -

      - Authors: - Quincey Koziol and - - Robb Matzke - -
      -
        - - FILES - - -
          - -
        • Source files are named according to the package they contain (see - below). All files will begin with `H5' so we can stuff our - object files into someone else's library and not worry about file - name conflicts. -

          For Example: - -

          H5.c -- "Generic" library functions -
          -
          H5B.c -- B-link tree functions - -

          -

        • If a package is in more than one file, then another name is tacked - on. It's all lower case with no underscores or hyphens. -

          For Example: - -

          H5F.c -- the file for this package -
          -
          H5Fstdio.c -- stdio functions (just an example) -
          -
          H5Ffcntl.c -- fcntl functions (just an example) - -

          -

        • Each package file has a header file of API stuff (unless there is - no API component to the package) -

          For Example: - -

          H5F.h -- things an application would see. -

          - and a header file of private stuff - -

          -

          H5Fprivate.h -- things an application wouldn't see. The - private header includes the public header. - -

          - and a header for private prototypes - -

          -

          H5Fproto.h -- prototypes for internal functions. - -

          - By splitting the prototypes into separate include files we don't - have to recompile everything when just one function prototype - changes. - -

        • The main API header file is `hdf5.h' and it includes each of the - public header files but none of the private header files. Or the - application can include just the public header files it needs. - -
        • There is no main private or prototype header file because it - prevents make from being efficient. Instead, each source file - includes only the private header and prototype files it needs - (first all the private headers, then all the private prototypes). - -
        • Header files should include everything they need and nothing more. - -
        -

        - - PACKAGES - - -

        -Names exported beyond function scope begin with `H5' followed by zero, -one, or two upper-case letters that describe the class of object. -This prefix is the package name. The implementation of packages -doesn't necessarily have to map 1:1 to the source files. -

        - -

        H5 -- library functions -
        -
        H5A -- atoms -
        -
        H5AC -- cache -
        -
        H5B -- B-link trees -
        -
        H5D -- datasets -
        -
        H5E -- error handling -
        -
        H5F -- files -
        -
        H5G -- groups -
        -
        H5M -- meta data -
        -
        H5MM -- core memory management -
        -
        H5MF -- file memory management -
        -
        H5O -- object headers -
        -
        H5P -- Property Lists -
        -
        H5S -- dataspaces -
        -
        H5R -- relationships -
        -
        H5T -- datatype - -

        -Each package implements a single main class of object (e.g., the H5B -package implements B-link trees). The main data type of a package is -the package name followed by `_t'. -

        - -

        H5F_t -- HDF5 file type -
        -
        H5B_t -- B-link tree data type - -

        - -Not all packages implement a data type (H5, H5MF) and some -packages provide access to a preexisting data type (H5MM, H5S). -

        - - - PUBLIC vs PRIVATE - -

        -If the symbol is for internal use only, then the package name is -followed by an underscore and the rest of the name. Otherwise, the -symbol is part of the API and there is no underscore between the -package name and the rest of the name. -

        - -

        H5Fopen -- an API function. -
        -
        H5B_find -- an internal function. - -

        -For functions, this is important because the API functions never pass -pointers around (they use atoms instead for hiding the implementation) -and they perform stringent checks on their arguments. Internal -unctions, on the other hand, check arguments with assert(). -

        -Data types like H5B_t carry no information about whether the type is -public or private since it doesn't matter. - -

        - - - INTEGRAL TYPES - -

        -Integral fixed-point type names are an optional `u' followed by `int' -followed by the size in bits (8, 16, -32, or 64). There is no trailing `_t' because these are common -enough and follow their own naming convention. -

        -

        -
        hbool_t -- boolean values (BTRUE, BFALSE, BFAIL) -
        -
        int8 -- signed 8-bit integers -
        -
        uint8 -- unsigned 8-bit integers -
        -
        int16 -- signed 16-bit integers -
        -
        uint16 -- unsigned 16-bit integers -
        -
        int32 -- signed 32-bit integers -
        -
        uint32 -- unsigned 32-bit integers -
        -
        int64 -- signed 64-bit integers -
        -
        uint64 -- unsigned 64-bit integers -
        -
        intn -- "native" integers -
        -
        uintn -- "native" unsigned integers - -

        -

        - - OTHER TYPES - - -

        - -Other data types are always followed by `_t'. -

        -

        -
        H5B_key_t-- additional data type used by H5B package. -

        -

        - -However, if the name is so common that it's used almost everywhere, -then we make an alias for it by removing the package name and leading -underscore and replacing it with an `h' (the main datatype for a -package already has a short enough name, so we don't have aliases for -them). -

        -

        -
        typedef H5E_err_t herr_t; -

        -

        - - GLOBAL VARIABLES - -

        -Global variables include the package name and end with `_g'. -

        -

        -
        H5AC_methods_g -- global variable in the H5AC package. -

        -

        - - - - -MACROS, PREPROCESSOR CONSTANTS, AND ENUM MEMBERS - - -

        -Same rules as other symbols except the name is all upper case. There -are a few exceptions:
        -

          -
        • Constants and macros defined on a system that is deficient: -

          -
          MIN(x,y), MAX(x,y) and their relatives -

          - -
        • Platform constants : -

          - No naming scheme; determined by OS and compiler.
          - These appear only in one header file anyway. -

          -

        • Feature test constants (?)
          - Always start with `HDF5_HAVE_' like HDF5_HAVE_STDARG_H for a - header file, or HDF5_HAVE_DEV_T for a data type, or - HDF5_HAVE_DIV for a function. -
        -

        - -

      -

      -

      -
      - This file /hdf3/web/hdf/internal/HDF_standard/HDF5.coding_standard.html is - maintained by Elena Pourmal - epourmal@ncsa.uiuc.edu . -
      -

      -

      - Last modified August 5, 1997 -
      - -
      - - - diff --git a/doc/html/TechNotes/ObjectHeader.html b/doc/html/TechNotes/ObjectHeader.html deleted file mode 100644 index 1335d23..0000000 --- a/doc/html/TechNotes/ObjectHeader.html +++ /dev/null @@ -1,72 +0,0 @@ - - - -

      Object Headers

      - -
      -
      -haddr_t
      -H5O_new (hdf5_file_t *f, intn nrefs, size_t size_hint)
      -
      -	Creates a new empty object header and returns its address.
      -	The SIZE_HINT is the initial size of the data portion of the
      -	object header and NREFS is the number of symbol table entries
      -	that reference this object header (normally one).
      -
      -	If SIZE_HINT is too small, then at least some default amount
      -	of space is allocated for the object header.
      -
      -intn				        /*num remaining links		*/
      -H5O_link (hdf5_file_t *f,		/*file containing header	*/
      -	  haddr_t addr,			/*header file address		*/
      -	  intn adjust)			/*link adjustment amount	*/
      -
      -
      -size_t
      -H5O_sizeof (hdf5_file_t *f,		/*file containing header	*/
      -	    haddr_t addr,		/*header file address		*/
      -            H5O_class_t *type,		/*message type or H5O_ANY	*/
      -	    intn sequence)		/*sequence number, usually zero	*/
      -		
      -	Returns the size of a particular instance of a message in an
      -	object header.  When an object header has more than one
      -	instance of a particular message type, then SEQUENCE indicates
      -	which instance to return.
      -
      -void *
      -H5O_read (hdf5_file_t *f,		/*file containing header	*/
      -	  haddr_t addr,			/*header file address		*/
      -	  H5G_entry_t *ent,		/*optional symbol table entry	*/
      -	  H5O_class_t *type,		/*message type or H5O_ANY	*/
      -	  intn sequence,		/*sequence number, usually zero	*/
      -	  size_t size,			/*size of output message	*/
      -	  void *mesg)			/*output buffer			*/
      -
      -	Reads a message from the object header into memory.
      -
      -const void *
      -H5O_peek (hdf5_file_t *f,		/*file containing header	*/
      -          haddr_t addr,			/*header file address		*/
      -	  H5G_entry_t *ent,		/*optional symbol table entry	*/
      -	  H5O_class_t *type,		/*type of message or H5O_ANY	*/
      -	  intn sequence)		/*sequence number, usually zero	*/
      -
      -haddr_t					/*new heap address		*/
      -H5O_modify (hdf5_file_t *f,		/*file containing header	*/
      -            haddr_t addr,		/*header file address		*/
      -	    H5G_entry_t *ent,		/*optional symbol table entry	*/
      -	    hbool_t *ent_modified,	/*entry modification flag	*/
      -	    H5O_class_t *type,		/*message type			*/
      -	    intn overwrite,		/*sequence number or -1		*/
      -	    void *mesg)			/*the message			*/  
      -	  
      -
      -===========================================
      -Last Modified:  8 July 1998 (technical content)
      -Last Modified:  28 April 2000 (included in HDF5 Technical Notes)
      -HDF Help Desk:  hdfhelp@ncsa.uiuc.edu
      -
      -
      - - - diff --git a/doc/html/TechNotes/RawDStorage.html b/doc/html/TechNotes/RawDStorage.html deleted file mode 100644 index 87ea54d..0000000 --- a/doc/html/TechNotes/RawDStorage.html +++ /dev/null @@ -1,274 +0,0 @@ - - - - Raw Data Storage in HDF5 - - - -

      Raw Data Storage in HDF5

      - -

      This document describes the various ways that raw data is - stored in an HDF5 file and the object header messages which - contain the parameters for the storage. - -

      Raw data storage has three components: the mapping from some - logical multi-dimensional element space to the linear address - space of a file, compression of the raw data on disk, and - striping of raw data across multiple files. These components - are orthogonal. - -

      Some goals of the storage mechanism are to be able to - efficently store data which is: - -

      -
      Small -
      Small pieces of raw data can be treated as meta data and - stored in the object header. This will be achieved by storing - the raw data in the object header with message 0x0006. - Compression and striping are not supported in this case. - -
      Complete Large -
      The library should be able to store large arrays - contiguously in the file provided the user knows the final - array size a priori. The array can then be read/written in a - single I/O request. This is accomplished by describing the - storage with object header message 0x0005. Compression and - striping are not supported in this case. - -
      Sparse Large -
      A large sparse raw data array should be stored in a manner - that is space-efficient but one in which any element can still - be accessed in a reasonable amount of time. Implementation - details are below. - -
      Dynamic Size -
      One often doesn't have prior knowledge of the size of an - array. It would be nice to allow arrays to grow dynamically in - any dimension. It might also be nice to allow the array to - grow in the negative dimension directions if convenient to - implement. Implementation details are below. - -
      Subslab Access -
      Some multi-dimensional arrays are almost always accessed by - subslabs. For instance, a 2-d array of pixels might always be - accessed as smaller 1k-by-1k 2-d arrays always aligned on 1k - index values. We should be able to store the array in such a - way that striding though the entire array is not necessary. - Subslab access might also be useful with compression - algorithms where each storage slab can be compressed - independently of the others. Implementation details are below. - -
      Compressed -
      Various compression algorithms can be applied to the entire - array. We're not planning to support separate algorithms (or a - single algorithm with separate parameters) for each chunk - although it would be possible to implement that in a manner - similar to the way striping across files is - implemented. - -
      Striped Across Files -
      The array access functions should support arrays stored - discontiguously across a set of files. -
      - -

      Implementation of Indexed Storage

      - -

      The Sparse Large, Dynamic Size, and Subslab Access methods - share so much code that they can be described with a single - message. The new Indexed Storage Message (0x0008) - will replace the old Chunked Object (0x0009) and - Sparse Object (0x000A) Messages. - -

      -

      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
      - The Format of the Indexed Storage Message -
      bytebytebytebyte

      Address of B-tree

      Number of DimensionsReservedReservedReserved
      Reserved (4 bytes)
      Alignment for Dimension 0 (4 bytes)
      Alignment for Dimension 1 (4 bytes)
      ...
      Alignment for Dimension N (4 bytes)
      -
      - -

      The alignment fields indicate the alignment in logical space to - use when allocating new storage areas on disk. For instance, - writing every other element of a 100-element one-dimensional - array (using one HDF5 I/O partial write operation per element) - that has unit storage alignment would result in 50 - single-element, discontiguous storage segments. However, using - an alignment of 25 would result in only four discontiguous - segments. The size of the message varies with the number of - dimensions. - -

      A B-tree is used to point to the discontiguous portions of - storage which has been allocated for the object. All keys of a - particular B-tree are the same size and are a function of the - number of dimensions. It is therefore not possible to change the - dimensionality of an indexed storage array after its B-tree is - created. - -

      -

      - - - - - - - - - - - - - - - - - - - - - - - - -
      - The Format of a B-Tree Key -
      bytebytebytebyte
      External File Number or Zero (4 bytes)
      Chunk Offset in Dimension 0 (4 bytes)
      Chunk Offset in Dimension 1 (4 bytes)
      ...
      Chunk Offset in Dimension N (4 bytes)
      -
      - -

      The keys within a B-tree obey an ordering based on the chunk - offsets. If the offsets in dimension-0 are equal, then - dimension-1 is used, etc. The External File Number field - contains a 1-origin offset into the External File List message - which contains the name of the external file in which that chunk - is stored. - -

      Implementation of Striping

      - -

      The indexed storage will support arbitrary striping at the - chunk level; each chunk can be stored in any file. This is - accomplished by using the External File Number field of an - indexed storage B-tree key as a 1-origin offset into an External - File List Message (0x0009) which takes the form: - -

      -

      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
      - The Format of the External File List Message -
      bytebytebytebyte

      Name Heap Address

      Number of Slots Allocated (4 bytes)
      Number of File Names (4 bytes)
      Byte Offset of Name 1 in Heap (4 bytes)
      Byte Offset of Name 2 in Heap (4 bytes)
      ...

      Unused Slot(s)

      -
      - -

      Each indexed storage array that has all or part of its data - stored in external files will contain a single external file - list message. The size of the messages is determined when the - message is created, but it may be possible to enlarge the - message on demand by moving it. At this time, it's not possible - for multiple arrays to share a single external file list - message. - -

      -
      - H5O_efl_t *H5O_efl_new (H5G_entry_t *object, intn - nslots_hint, intn heap_size_hint) - -
      Adds a new, empty external file list message to an object - header and returns a pointer to that message. The message - acts as a cache for file descriptors of external files that - are open. - -

      - intn H5O_efl_index (H5O_efl_t *efl, const char *filename) - -
      Gets the external file index number for a particular file name. - If the name isn't in the external file list then it's added to - the H5O_efl_t struct and immediately written to the object - header to which the external file list message belongs. Name - comparison is textual. Each name should be relative to the - directory which contains the HDF5 file. - -

      - H5F_low_t *H5O_efl_open (H5O_efl_t *efl, intn index, uintn mode) - -
      Gets a low-level file descriptor for an external file. The - external file list caches file descriptors because we might - have many more external files than there are file descriptors - available to this process. The caller should not close this file. - -

      - herr_t H5O_efl_release (H5O_efl_t *efl) - -
      Releases an external file list, closes all files - associated with that list, and if the list has been modified - since the call to H5O_efl_new flushes the message - to disk. -
      - -
      -
      Robb Matzke
      - - -Last modified: Tue Nov 25 12:36:50 EST 1997 - - - diff --git a/doc/html/TechNotes/ReservedFileSpace.html b/doc/html/TechNotes/ReservedFileSpace.html deleted file mode 100644 index 22e3614..0000000 --- a/doc/html/TechNotes/ReservedFileSpace.html +++ /dev/null @@ -1,29 +0,0 @@ - - - Reserved File Address Space - - - - Reserved File Address Space - -

      HDF5 files use 8-byte addresses by default, but users can change this to 2, 4, or even 16 bytes. This means that it is possible to have files that only address 64 KB of space, and thus that HDF must handle the case of files that have enough space on disk but not enough internal address space to be written.

      - -

      Thus, every time space is allocated in a file, HDF needs to check that this allocation is within the file’s address space. If not, HDF should output an error and ensure that all the data currently in the file (everything that is still addressable) is successfully written to disk.

      - -

      Unfortunately, some structures are stored in memory and do not allocate space for themselves until the file is actually flushed to disk (object headers and the local heap). This is good for efficiency, since these structures can grow without creating the fragmentation that would result from frequent allocation and deallocation, but means that if the library runs out of addressable space while allocating memory, these structures will not be present in the file. Without them, HDF5 does not know how to parse the data in the file, rendering it unreadable.

      - -

      Thus, HDF keeps track of the space “reserved for allocation” in the file (H5FD_t struct). When a function tries to allocate space in the file, it first checks that the allocation would not overflow the address space, taking the reserved space into account. When object headers or the heap finally allocate the space they have reserved, they free the reserved space before allocating file space.

      - -

      A given object header is only flushed to disk once, but the heap can be flushed to disk multiple times over the life of the file and will require contiguous space every time. To handle this, the heap keeps track of how much space it has reserved. This allows it to reserve space only when it grows (when it is dirty and needs to be re-written to disk).

      - -

      For instance, if the heap is flushed to disk, it frees its reserved space. If new data is inserted into the heap in memory, the heap may need to flush to disk again in a new, larger section of memory. Thus, not only does it reserve space in the file for this new data, but also for all of the previously-existing data in the heap to be re-written. The next insert, however, will only need to reserve space for its new data, since the rest of the heap already has space reserved for it.

      - -

      Potential issues: -

        -
      1. This system does not take into accout deleted space. Deleted space can still be allocated as usual, but "reserved" space is always taken off the end of a file. This means that a file that was filled to capacity but then had a significant number of objects deleted will still throw errors if more data is written. This occurs because the file's free space is in the middle of the file, not at the end. A more complete space-reservation system would test if the reserved data can fit into the file's free list, but this would be significantly more complicated to implement.
      2. - -
      3. HDF5 currently claims to support 16-byte addresses, but a number of platforms do not support 16-byte integers, so addresses of this size cannot be represented in memory. This solution does not attempt to address this issue.
      4. -
      -

      - - diff --git a/doc/html/TechNotes/SWControls.html b/doc/html/TechNotes/SWControls.html deleted file mode 100755 index 8dac1ce..0000000 --- a/doc/html/TechNotes/SWControls.html +++ /dev/null @@ -1,96 +0,0 @@ - - - - - -HDF5 Software Controls - - - - - -

      HDF5 Software Controls

      - -

      -(Work in progress draft) -

      - -

      -A descriptions knobs and turns such as environment variables and settings -that controls the functionality of HDF5 libraries and tools. This is -intended for HDF5 libraries and tools developers. HDF5 application users -may consult the document A guide to -debugging HDF5 API calls. -

      - -

      -

      Library Building Controls

      -

      - -

      -

      Environment variables

      -

      - -
      -
      CC
      -
      Used by configure. Override the default C compiler.
      -
      LIBS
      -
      Used by configure. Add more libraries to be used.
      -
      NP
      -
      Number of MPI-processes to invoke for testing. Default to 2.
      -
      HDF5_NOCLEANUP
      -
      Used by most test programs. When set, temporary files - created during tests are NOT removed. Default is to remove them - by the end of each test. Note that the variable value does not - matter. E.g., the values of "yes", "no" and "" all have the same - effect, that is, NO cleanup.
      -
      H5FD_mpio_Debug
      -
      Used by the MPIO file driver for debugging. Need to have - H5FDmpio_DEBUG macro defined during compiling. Should be set to a - string to turn on various tracing. Valid values (cases matter) - are: -
      -
      t
      -
      Trace all routine
      -
      r
      -
      Trace read routines
      -
      w
      -
      Trace write routines
      -
      c
      -
      Show result of MPI_Get_count
      -
      -
      -
      HDF5_MPI_OPT_TYPES
      -
      Used by the MPIO file driver to control the use of the optimized - mpi input/output routine. 0 turns it off, 1 turns it on (uses - optimized code if it can).
      -
      - -

      -

      Compile Macros

      -

      - -
      -
      H5FDmpio_DEBUG
      -
      Compile macro. Compile in the MPIO file driver related debugging - statements. Defined if macro H5F_DEBUG is defined.
      -
      H5FD_mpio_Debug
      -
      Compile in debugging used by the MPIO file driver. Need to have - H5FDmpio_DEBUG macro defined during compiling. Should be set to a - string to turn on the tracing. See environment variable - H5FD_mpio_Debug for valid values.
      -
      - -

      -


      -

      - -
      -HDF Help Desk -
      - -

      Last -modified: December 11, 2000

      - - - diff --git a/doc/html/TechNotes/SymbolTables.html b/doc/html/TechNotes/SymbolTables.html deleted file mode 100644 index 05ee538..0000000 --- a/doc/html/TechNotes/SymbolTables.html +++ /dev/null @@ -1,329 +0,0 @@ - - - -

      Symbol Table Caching Issues

      - -
      -
      -A number of issues involving caching of object header messages in
      -symbol table entries must be resolved.
      -
      -What is the motivation for these changes?
      -
      -   If we make objects completely independent of object name it allows
      -   us to refer to one object by multiple names (a concept called hard
      -   links in Unix file systems), which in turn provides an easy way to
      -   share data between datasets.
      -
      -   Every object in an HDF5 file has a unique, constant object header
      -   address which serves as a handle (or OID) for the object.  The
      -   object header contains messages which describe the object.
      -
      -   HDF5 allows some of the object header messages to be cached in
      -   symbol table entries so that the object header doesn't have to be
      -   read from disk.  For instance, an entry for a directory caches the
      -   directory disk addresses required to access that directory, so the
      -   object header for that directory is seldom read.
      -
      -   If an object has multiple names (that is, a link count greater than
      -   one), then it has multiple symbol table entries which point to it.
      -   All symbol table entries must agree on header messages.  The
      -   current mechanism is to turn off the caching of header messages in
      -   symbol table entries when the header link count is more than one,
      -   and to allow caching once the link count returns to one.
      -
      -   However, in the current implementation, a package is allowed to
      -   copy a symbol table entry and use it as a private cache for the
      -   object header.  This doesn't work for a number of reasons (all but
      -   one require a `delete symbol entry' operation).
      -
      -      1. If two packages hold copies of the same symbol table entry,
      -         they don't notify each other of changes to the symbol table
      -         entry. Eventually, one package reads a cached message and
      -         gets the wrong value because the other package changed the
      -         message in the object header.
      -
      -      2. If one package holds a copy of the symbol table entry and
      -         some other part of HDF5 removes the object and replaces it
      -         with some other object, then the original package will
      -         continue to access the non-existent object using the new
      -         object header.
      -
      -      3. If one package holds a copy of the symbol table entry and
      -         some other part of HDF5 (re)moves the directory which
      -         contains the object, then the package will be unable to
      -         update the symbol table entry with the new cached
      -         data. Packages that refer to the object by the new name will
      -         use old cached data.
      -
      -
      -The basic problem is that there may be multiple copies of the object
      -symbol table entry floating around in the code when there should
      -really be at most one per hard link.
      -
      -   Level 0: A copy may exist on disk as part of a symbol table node, which
      -            is a small 1d array of symbol table entries.
      -
      -   Level 1: A copy may be cached in memory as part of a symbol table node
      -	    in the H5Gnode.c file by the H5AC layer.
      -
      -   Level 2a: Another package may be holding a copy so it can perform
      -   	     fast lookup of any header messages that might be cached in
      -   	     the symbol table entry.  It can't point directly to the
      -   	     cached symbol table node because that node can dissappear
      -   	     at any time.
      -
      -   Level 2b: Packages may hold more than one copy of a symbol table
      -             entry.  For instance, if H5D_open() is called twice for
      -             the same name, then two copies of the symbol table entry
      -             for the dataset exist in the H5D package.
      -
      -How can level 2a and 2b be combined?
      -
      -   If package data structures contained pointers to symbol table
      -   entries instead of copies of symbol table entries and if H5G
      -   allocated one symbol table entry per hard link, then it's trivial
      -   for Level 2a and 2b to benefit from one another's actions since
      -   they share the same cache.
      -
      -How does this work conceptually?
      -
      -   Level 2a and 2b must notify Level 1 of their intent to use (or stop
      -   using) a symbol table entry to access an object header.  The
      -   notification of the intent to access an object header is called
      -   `opening' the object and releasing the access is `closing' the
      -   object.
      -
      -   Opening an object requires an object name which is used to locate
      -   the symbol table entry to use for caching of object header
      -   messages.  The return value is a handle for the object.  Figure 1
      -   shows the state after Dataset1 opens Object with a name that maps
      -   through Entry1.  The open request created a copy of Entry1 called
      -   Shadow1 which exists even if SymNode1 is preempted from the H5AC
      -   layer.
      -
      -                                                     ______
      -                                            Object  /      \
      -	     SymNode1                     +--------+        |
      -	    +--------+            _____\  | Header |        |
      -	    |        |           /     /  +--------+        |
      -	    +--------+ +---------+                  \______/
      -	    | Entry1 | | Shadow1 | /____
      -	    +--------+ +---------+ \    \
      -	    :        :                   \
      -	    +--------+                    +----------+
      -					  | Dataset1 |
      -					  +----------+
      -			     FIGURE 1
      -
      -
      -
      -  The SymNode1 can appear and disappear from the H5AC layer at any
      -  time without affecting the Object Header data cached in the Shadow.
      -  The rules are:
      -
      -  * If the SymNode1 is present and is about to disappear and the
      -    Shadow1 dirty bit is set, then Shadow1 is copied over Entry1, the
      -    Entry1 dirty bit is set, and the Shadow1 dirty bit is cleared.
      -
      -  * If something requests a copy of Entry1 (for a read-only peek
      -    request), and Shadow1 exists, then a copy (not pointer) of Shadow1
      -    is returned instead.
      -
      -  * Entry1 cannot be deleted while Shadow1 exists.
      -
      -  * Entry1 cannot change directly if Shadow1 exists since this means
      -    that some other package has opened the object and may be modifying
      -    it.  I haven't decided if it's useful to ever change Entry1
      -    directly (except of course within the H5G layer itself).
      -
      -  * Shadow1 is created when Dataset1 `opens' the object through
      -    Entry1. Dataset1 is given a pointer to Shadow1 and Shadow1's
      -    reference count is incremented.
      -
      -  * When Dataset1 `closes' the Object the Shadow1 reference count is
      -    decremented.  When the reference count reaches zero, if the
      -    Shadow1 dirty bit is set, then Shadow1's contents are copied to
      -    Entry1, and the Entry1 dirty bit is set. Shadow1 is then deleted
      -    if its reference count is zero.  This may require reading SymNode1
      -    back into the H5AC layer.
      -
      -What happens when another Dataset opens the Object through Entry1?
      -
      -  If the current state is represented by the top part of Figure 2,
      -  then Dataset2 will be given a pointer to Shadow1 and the Shadow1
      -  reference count will be incremented to two.  The Object header link
      -  count remains at one so Object Header messages continue to be cached
      -  by Shadow1. Dataset1 and Dataset2 benefit from one another
      -  actions. The resulting state is represented by Figure 2.
      -
      -                                                     _____
      -             SymNode1                       Object  /     \
      -            +--------+            _____\  +--------+       |
      -            |        |           /     /  | Header |       |
      -            +--------+ +---------+        +--------+       |
      -            | Entry1 | | Shadow1 | /____            \_____/
      -            +--------+ +---------+ \    \
      -            :        :        _          \
      -            +--------+       |\           +----------+
      -                               \          | Dataset1 |
      -                                \________ +----------+
      -                                         \              \
      -                                          +----------+   |
      -                                          | Dataset2 |   |- New Dataset
      -                                          +----------+   |
      -                                                        /
      -			     FIGURE 2
      -
      -
      -What happens when the link count for Object increases while Dataset
      -has the Object open?
      -
      -                                                     SymNode2
      -                                                    +--------+
      -    SymNode1                       Object           |        |
      -   +--------+             ____\  +--------+ /______ +--------+
      -   |        |            /    /  | header | \      `| Entry2 |
      -   +--------+ +---------+        +--------+         +--------+
      -   | Entry1 | | Shadow1 | /____                     :        :
      -   +--------+ +---------+ \    \                    +--------+
      -   :        :                   \
      -   +--------+                    +----------+   \________________/
      -                                 | Dataset1 |            |
      -                                 +----------+         New Link
      -
      -			     FIGURE 3
      -
      -  The current state is represented by the left part of Figure 3.  To
      -  create a new link the Object Header had to be located by traversing
      -  through Entry1/Shadow1.  On the way through, the Entry1/Shadow1 
      -  cache is invalidated and the Object Header link count is
      -  incremented. Entry2 is then added to SymNode2.
      -
      -  Since the Object Header link count is greater than one, Object
      -  header data will not be cached in Entry1/Shadow1.
      -
      -  If the initial state had been all of Figure 3 and a third link is
      -  being added and Object is open by Entry1 and Entry2, then creation
      -  of the third link will invalidate the cache in Entry1 or Entry2.  It
      -  doesn't matter which since both caches are already invalidated
      -  anyway.
      -
      -What happens if another Dataset opens the same object by another name?
      -
      -  If the current state is represented by Figure 3, then a Shadow2 is
      -  created and associated with Entry2.  However, since the Object
      -  Header link count is more than one, nothing gets cached in Shadow2
      -  (or Shadow1).
      -
      -What happens if the link count decreases?
      -
      -  If the current state is represented by all of Figure 3 then it isn't
      -  possible to delete Entry1 because the object is currently open
      -  through that entry.  Therefore, the link count must have
      -  decreased because Entry2 was removed.
      -
      -  As Dataset1 reads/writes messages in the Object header they will
      -  begin to be cached in Shadow1 again because the Object header link
      -  count is one.
      -
      -What happens if the object is removed while it's open?
      -
      -  That operation is not allowed.
      -
      -What happens if the directory containing the object is deleted?
      -
      -  That operation is not allowed since deleting the directory requires
      -  that the directory be empty.  The directory cannot be emptied
      -  because the open object cannot be removed from the directory.
      -
      -What happens if the object is moved?
      -
      -  Moving an object is a process consisting of creating a new
      -  hard-link with the new name and then deleting the old name.
      -  This will fail if the object is open.
      -
      -What happens if the directory containing the entry is moved?
      -
      -  The entry and the shadow still exist and are associated with one
      -  another.
      -
      -What if a file is flushed or closed when objects are open?
      -
      -  Flushing a symbol table with open objects writes correct information
      -  to the file since Shadow is copied to Entry before the table is
      -  flushed.
      -
      -  Closing a file with open objects will create a valid file but will
      -  return failure.
      -
      -How is the Shadow associated with the Entry?
      -
      -  A symbol table is composed of one or more symbol nodes.  A node is a
      -  small 1-d array of symbol table entries.  The entries can move
      -  around within a node and from node-to-node as entries are added or
      -  removed from the symbol table and nodes can move around within a
      -  symbol table, being created and destroyed as necessary.
      -
      -  Since a symbol table has an object header with a unique and constant
      -  file offset, and since H5G contains code to efficiently locate a
      -  symbol table entry given it's name, we use these two values as a key
      -  within a shadow to associate the shadow with the symbol table
      -  entry.
      -
      -	struct H5G_shadow_t {
      -	   haddr_t	stab_addr;    /*symbol table header address*/   
      -	   char         *name;	      /*entry name wrt symbol table*/
      -           hbool_t      dirty;	      /*out-of-date wrt stab entry?*/
      -	   H5G_entry_t  ent;	      /*my copy of stab entry      */
      -	   H5G_entry_t  *main;	      /*the level 1 entry or null  */
      -           H5G_shadow_t *next, *prev; /*other shadows for this stab*/
      -      	};
      -
      -  The set of shadows will be organized in a hash table of linked
      -  lists.  Each linked list will contain the shadows associated with a
      -  particular symbol table header address and the list will be sorted
      -  lexicographically.
      -
      -  Also, each Entry will have a pointer to the corresponding Shadow or
      -  null if there is no shadow.
      -
      -  When a symbol table node is loaded into the main cache, we look up
      -  the linked list of shadows in the shadow hash table based on the
      -  address of the symbol table object header.  We then traverse that
      -  list matching shadows with symbol table entries.
      -
      -  We assume that opening/closing objects will be a relatively
      -  infrequent event compared with loading/flushing symbol table
      -  nodes. Therefore, if we keep the linked list of shadows sorted it
      -  costs O(N) to open and close objects where N is the number of open
      -  objects in that symbol table (instead of O(1)) but it costs only
      -  O(N) to load a symbol table node (instead of O(N^2)).
      -
      -What about the root symbol entry?
      -
      -  Level 1 storage for the root symbol entry is always available since
      -  it's stored in the hdf5_file_t struct instead of a symbol table
      -  node.  However, the contents of that entry can move from the file
      -  handle to a symbol table node by H5G_mkroot().  Therefore, if the
      -  root object is opened, we keep a shadow entry for it whose
      -  `stab_addr' field is zero and whose `name' is null.
      -
      -  For this reason, the root object should always be read through the
      -  H5G interface.
      -
      -One more key invariant:  The H5O_STAB message in a symbol table header
      -never changes.  This allows symbol table entries to cache the H5O_STAB
      -message for the symbol table to which it points without worrying about
      -whether the cache will ever be invalidated.
      -
      -
      -===========================================
      -Last Modified:  8 July 1998 (technical content)
      -Last Modified:  28 April 2000 (included in HDF5 Technical Notes)
      -HDF Help Desk:  hdfhelp@ncsa.uiuc.edu
      -
      -
      - - - diff --git a/doc/html/TechNotes/TestReview.html b/doc/html/TechNotes/TestReview.html deleted file mode 100644 index 410f662..0000000 --- a/doc/html/TechNotes/TestReview.html +++ /dev/null @@ -1,57 +0,0 @@ - - - - State of API Test Review for HDF5 - - - - - - -

      State of API Testing Review for HDF5

      - -
        - -
      1. Purpose:

        -

        This document describes the current state of the API test review. Currently, -the tests for each API function are being reviewed on an individual basis and -each API's tests are being described and improvements made. -

        - -
      2. APIs Reviewed:

        - - - - - - - - - - - - - - - - - - - - -
        API FunctionDate Last ReviewedStatus
        H5Dget_offset -Tuesday, November 11th, 2002 -Tests need to be updated -
        H5Tget_native_type -Tuesday, November 11th, 2002 -Tests need to be updated -
        - - -
      - - - diff --git a/doc/html/TechNotes/TestReview/H5Dget_offset.html b/doc/html/TechNotes/TestReview/H5Dget_offset.html deleted file mode 100644 index 0056f00..0000000 --- a/doc/html/TechNotes/TestReview/H5Dget_offset.html +++ /dev/null @@ -1,199 +0,0 @@ - - - - H5Dget_offset Test Review - - - - - - -

      H5Dget_offset Test Review

      - -
        - -
      1. Purpose:

        -

        This document describes the API test review results for H5Dget_offset(). -

        - -
      2. Serial Review:

        - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        Test case -Test source file -Test method -Expected test results -Notes -
        Chunked dataset -dsets.c - -
          -
        1. Create chunked dataset -
        2. Query dataset offset -
        -
        FAIL - -

        Because dataset is stored in chunks that are indexed by a B-tree, there is -no single piece of data to query the offset of. -

        -

        It may be possible in the future to -enhance this function by querying the offset of a particular chunk (or chunks), -but that has limited use because chunks could be compressed, etc. with an I/O -filter. -

        -
        Compact dataset -dsets.c - -
          -
        1. Create chunked dataset -
        2. Query dataset offset -
        -
        FAIL - -

        Because dataset is stored in the object header of the dataset, there is -no separate piece of data to query the offset of. -

        -

        It may be possible in the future to get the offset of the data in the object -header, but this is problematic due to the fact that the messages in the object -header can get relocated in the file when changes (like adding attributes, etc.) -are made to the dataset, invalidating the address given to the user. -filter. -

        -
        Contiguous dataset, [user block size] == 0, not external -dsets.c - -
          -
        1. Create file with 0 sized user-block (the default) -
        2. Create contigous dataset -
        3. Query dataset offset -
        -
        -

        Succeed in getting the proper address and be able to verify -that the data at that address in the file is what was written out. -

        -

        When data storage allocation is "late" (the default), querying the offset -should fail if performed before data is written to the dataset. -

        -
        Needs additional test to verify that the data written out is located at the -correct offset in the file. -
        Contiguous dataset, [user block size] != 0, not external -dsets.c - -
          -
        1. Create file with non-0 sized user-block -
        2. Create contigous dataset -
        3. Query dataset offset -
        -
        -

        Succeed in getting the proper address and be able to verify -that the data at that address in the file is what was written out. -

        -

        When data storage allocation is "late" (the default), querying the offset -should fail if performed before data is written to the dataset. -

        -
        Needs test for this case. -
        Contiguous dataset, [user block size] == 0, external data storage -external.c - -
          -
        1. Create contigous dataset with external storage -
        2. Query dataset offset -
        -
        FAIL - -

        In theory, it's easy to return the offset of the data in the external file, -but this wasn't done because it would be too easy for users to assume that the -offset returned was in the HDF5 file instead of the external file. -

        -
        -
        - -
      3. Parallel Review:

        -

        The H5Dget_offset() function is not tested in parallel. Currently, there -does not appear to be a need for this. -

        - - -
      - - - - diff --git a/doc/html/TechNotes/TestReview/H5Tget_native_type.html b/doc/html/TechNotes/TestReview/H5Tget_native_type.html deleted file mode 100644 index 1c6409d..0000000 --- a/doc/html/TechNotes/TestReview/H5Tget_native_type.html +++ /dev/null @@ -1,522 +0,0 @@ - - - - H5Dget_native_type Test Review - - - - - - -

      H5Dget_native_type Test Review

      - -
        - -
      1. Purpose:

        -

        This document describes the API test review results for H5Dget_native_type(). -

        - -
      2. Serial Review:

        - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        Test case -Test source file -Test method -Expected test results -Notes -
        Native int datatype -native.c - -
          -
        1. Create dataset with I32BE datatype -
        2. Query dataset's datatype -
        3. Create native datatype from dataset datatype -
        4. Compare order, class & size of native datatype to known results -
        -
        Check that type's size, order and class are correct. - -

        Data is written & read back in for this test. -

        -

        It would be convenient to have a function in the test module for choosing -the correct atomic datatype based on the particular platform settings. This -should use the H5_SIZEOF_ macros. -

        -
        Native long long datatype -native.c - -
          -
        1. Create dataset with I64LE datatype -
        2. Query dataset's datatype -
        3. Create native datatype from dataset datatype -
        4. Compare order, class & size of native datatype to known results -
        -
        Check that type's size, order and class are correct. - -

        Data is NOT written & read back in for this test. -

        -
        Native char datatype -native.c - -
          -
        1. Create dataset with I8LE datatype -
        2. Query dataset's datatype -
        3. Create native datatype from dataset datatype -
        4. Compare order, class & size of native datatype to known results -
        -
        Check that type's size, order and class are correct. - -

        Data is NOT written & read back in for this test. -

        -
        Native float datatype -native.c - -
          -
        1. Create dataset with F32BE datatype -
        2. Query dataset's datatype -
        3. Create native datatype from dataset datatype -
        4. Compare order, class & size of native datatype to known results -
        -
        Check that type's size, order and class are correct. - -

        Data is NOT written & read back in for this test. -

        -

        Need test for native double datatype (stored as 32-bit floating-point -datatype in file). This will probably require using an "epsilon" if the data -is compared for this test. -

        -
        Compound datatype with atomic fields -native.c - -
          -
        1. Create datatype describing native (unpacked) struct in memory -
        2. Create datatype describing packed struct for disk -
        3. Create dataset with "packed" compound datatype -
        4. Query dataset's datatype -
        5. Get native datatype from dataset's datatype -
        6. Use H5Tequal to verify that the native datatype is the same as the native, -unpacked datatype. -
        -
        Check that native and unpacked datatypes are equal. - -

        Data is written & read back in for this test. -

        -
        Compound datatype with one compound field -native.c - -
          -
        1. Create datatype describing nested native (unpacked) structs in memory -
        2. Create datatype describing nested packed structs for disk -
        3. Create dataset with "packed" compound datatype -
        4. Query dataset's datatype -
        5. Get native datatype from dataset's datatype -
        6. Use H5Tequal to verify that the native datatype is the same as the native, -unpacked datatype. -
        -
        Check that native and unpacked datatypes are equal. - -

        Data is written & read back in for this test. -

        -

        Could use test for compound datatype with multiple compound fields. -

        -

        Could use test for 3 or more nested deep compound datatype. -

        -
        Enum datatype -native.c - -
          -
        1. Create enum datatype -
        2. Create dataset with enum datatype -
        3. Query dataset's datatype -
        4. Get native datatype from dataset's datatype -
        5. Use H5Tequal to verify that the native datatype is the same as the original -enum datatype. -
        -
        Check that native and original datatypes are equal. - -

        Data is written & read back in for this test. -

        -
        Array datatype -native.c - -
          -
        1. Create array datatype -
        2. Create dataset with array datatype -
        3. Query dataset's datatype -
        4. Get native datatype from dataset's datatype -
        5. Use H5Tequal to verify that the native datatype is the same as the original -array datatype. -
        -
        Check that native and original datatypes are equal. - -

        This is not tested currently. -

        -
        Array of compound datatype -native.c - -
          -
        1. Create array of compound datatype -
        2. Create dataset with array of compound datatype -
        3. Query dataset's datatype -
        4. Get native datatype from dataset's datatype -
        5. Use H5Tequal to verify that the native datatype is the same as the original -array of compound datatype. -
        -
        Check that native and original datatypes are equal. - -

        Data is written & read back in for this test. -

        -
        Compound datatype with array field -native.c - -
          -
        1. Create compound datatype with array field -
        2. Create dataset with compound datatype -
        3. Query dataset's datatype -
        4. Get native datatype from dataset's datatype -
        5. Use H5Tequal to verify that the native datatype is the same as the original -compound datatype. -
        -
        Check that native and original datatypes are equal. - -

        This is not tested currently. -

        -
        VL datatype with atomic base type -native.c - -
          -
        1. Create VL datatype -
        2. Create dataset with VL datatype -
        3. Query dataset's datatype -
        4. Get native datatype from dataset's datatype -
        5. Use H5Tequal to verify that the native datatype is the same as the original -VL datatype. -
        -
        Check that native and original datatypes are equal. - -

        Data is written & read back in for this test. -

        -

        Combinations with VL datatypes in other composite types and with other -datatypes for the base type of the VL datatype are not tested. -

        -
        VL string datatype -native.c - -
          -
        1. Create VL string datatype -
        2. Create dataset with VL string datatype -
        3. Query dataset's datatype -
        4. Get native datatype from dataset's datatype -
        5. Use H5Tequal to verify that the native datatype is the same as the original -VL string datatype. -
        -
        Check that native and original datatypes are equal. - -

        Data is written & read back in for this test. -

        -

        Combinations with VL string datatypes in composite types -are not tested. -

        -
        Reference datatype -native.c - -
          -
        1. Create reference datatype -
        2. Create dataset with reference datatype -
        3. Query dataset's datatype -
        4. Get native datatype from dataset's datatype -
        5. Use H5Tequal to verify that the native datatype is the same as the original -reference datatype. -
        -
        Check that native and original datatypes are equal. - -

        Data is written & read back in for this test. -

        -

        Combinations with reference datatypes in composite types -are not tested. -

        -
        Opaque datatype -native.c - -
          -
        1. Create opaque datatype -
        2. Create dataset with opaque datatype -
        3. Query dataset's datatype -
        4. Get native datatype from dataset's datatype -
        5. Use H5Tequal to verify that the native datatype is the same as the original -opaque datatype. -
        -
        Check that native and original datatypes are equal. - -

        Data is written & read back in for this test. -

        -

        Combinations with opaque datatypes in composite types -are not tested. -

        -
        Bitfield datatype -native.c - -
          -
        1. Create bitfield datatype -
        2. Create dataset with bitfield datatype -
        3. Query dataset's datatype -
        4. Get native datatype from dataset's datatype -
        5. Use H5Tequal to verify that the native datatype is the same as the original -bitfield datatype. -
        -
        Check that native and original datatypes are equal. - -

        Data is written & read back in for this test. -

        -

        Combinations with bitfield datatypes in composite types -are not tested. -

        -
        Time datatype -native.c - -
          -
        1. Create time datatype -
        2. Create dataset with time datatype -
        3. Query dataset's datatype -
        4. Get native datatype from dataset's datatype -
        5. Use H5Tequal to verify that the native datatype is the same as the original -time datatype. -
        -
        Check that native and original datatypes are equal. - -

        This is not tested currently. -

        -
        -
        - -
      3. Parallel Review:

        -

        The H5Dget_native_type() function is not tested in parallel. Currently, -there does not appear to be a need for this. -

        - - -
      - - - - diff --git a/doc/html/TechNotes/ThreadSafeLibrary.html b/doc/html/TechNotes/ThreadSafeLibrary.html deleted file mode 100644 index e7fdf11..0000000 --- a/doc/html/TechNotes/ThreadSafeLibrary.html +++ /dev/null @@ -1,794 +0,0 @@ - - - - Thread Safe Library - - - - -

      HDF5 Thread Safe library

      - -

      - -

      1. Library header files and conditional compilation

      - -

      -The following code is placed at the beginning of H5private.h: -

      - -
      -
      -  #ifdef H5_HAVE_THREADSAFE
      -  #include <pthread.h>
      -  #endif
      -  
      -
      - -

      -H5_HAVE_THREADSAFE is defined when the HDF-5 library is -compiled with the --enable-threadsafe configuration option. In general, -code for the non-threadsafe version of HDF-5 library are placed within -the #else part of the conditional compilation. The exception -to this rule are the changes to the FUNC_ENTER (in -H5private.h), HRETURN and HRETURN_ERROR (in -H5Eprivate.h) macros (see section 3.2). -

      - - -

      2. Global variables/structures

      - -

      2.1 Global library initialization variable

      - -

      -In the threadsafe implementation, the global library initialization -variable H5_libinit_g is changed to a global structure -consisting of the variable with its associated lock (locks are explained -in section 4.1): -

      - -
      -
      -    hbool_t  H5_libinit_g = FALSE;
      -  
      -
      - -

      -becomes -

      - -
      -
      -    H5_api_t H5_g;
      -  
      -
      - -

      -where H5_api_t is -

      - -
      -
      -    typedef struct H5_api_struct {
      -      H5_mutex_t init_lock;           /* API entrance mutex */
      -      hbool_t H5_libinit_g;
      -    } H5_api_t;
      -  
      -
      - -

      -All former references to H5_libinit_g in the library are now -made using the macro H5_INIT_GLOBAL. If the threadsafe -library is to be used, the macro is set to H5_g.H5_libinit_g -instead. -

      - -

      2.2 Global serialization variable

      - -

      -A new global boolean variable H5_allow_concurrent_g is used -to determine if multiple threads are allowed to an API call -simultaneously. This is set to FALSE. -

      - -

      -All APIs that are allowed to do so have their own local variable that -shadows the global variable and is set to TRUE. In phase 1, -no such APIs exist. -

      - -

      -It is defined in H5.c as follows: -

      - -
      -
      -    hbool_t H5_allow_concurrent_g = FALSE;
      -  
      -
      - -

      2.3 Global thread initialization variable

      - -

      -The global variable H5_first_init_g of type -pthread_once_t is used to allow only the first thread in the -application process to call an initialization function using -pthread_once. All subsequent calls to -pthread_once by any thread are disregarded. -

      - -

      -The call sets up the mutex in the global structure H5_g (see -section 3.1) via an initialization function -H5_first_thread_init. The first thread initialization -function is described in section 4.2. -

      - -

      -H5_first_init_g is defined in H5.c as follows: -

      - -
      -
      -    pthread_once_t H5_first_init_g = PTHREAD_ONCE_INIT;
      -  
      -
      - -

      2.4 Global key for per-thread error stacks

      - -

      -A global pthread-managed key H5_errstk_key_g is used to -allow pthreads to maintain a separate error stack (of type -H5E_t) for each thread. This is defined in H5.c -as: -

      - -
      -
      -    pthread_key_t H5_errstk_key_g;
      -  
      -
      - -

      -Error stack management is described in section 4.3. -

      - -

      2.5 Global structure and key for thread cancellation prevention

      - -

      -We need to preserve the thread cancellation status of each thread -individually by using a key H5_cancel_key_g. The status is -preserved using a structure (of type H5_cancel_t) which -maintains the cancellability state of the thread before it entered the -library and a count (which works very much like the recursive lock -counter) which keeps track of the number of API calls the thread makes -within the library. -

      - -

      -The structure is defined in H5private.h as: -

      - -
      -
      -    /* cancelability structure */
      -    typedef struct H5_cancel_struct {
      -      int previous_state;
      -      unsigned int cancel_count;
      -    } H5_cancel_t;
      -  
      -
      - -

      -Thread cancellation is described in section 4.4. -

      - - -

      3. Changes to Macro expansions

      - -

      3.1 Changes to FUNC_ENTER

      - -

      -The FUNC_ENTER macro is now extended to include macro calls -to initialize first threads, disable cancellability and wraps a lock -operation around the checking of the global initialization flag. It -should be noted that the cancellability should be disabled before -acquiring the lock on the library. Doing so otherwise would allow the -possibility that the thread be cancelled just after it has acquired the -lock on the library and in that scenario, if the cleanup routines are not -properly set, the library would be permanently locked out. -

      - -

      -The additional macro code and new macro definitions can be found in -Appendix E.1 to E.5. The changes are made in H5private.h. -

      - -

      3.2 Changes to HRETURN and HRETURN_ERROR

      - -

      -The HRETURN and HRETURN_ERROR macros are the -counterparts to the FUNC_ENTER macro described in section -3.1. FUNC_LEAVE makes a macro call to HRETURN, -so it is also covered here. -

      - -

      -The basic changes to these two macros involve adding macro calls to call -an unlock operation and re-enable cancellability if necessary. It should -be noted that the cancellability should be re-enabled only after the -thread has released the lock to the library. The consequence of doing -otherwise would be similar to that described in section 3.1. -

      - -

      -The additional macro code and new macro definitions can be found in -Appendix E.9 to E.9. The changes are made in H5Eprivate.h. -

      - -

      4. Implementation of threadsafe functionality

      - -

      4.1 Recursive Locks

      - -

      -A recursive mutex lock m allows a thread t1 to successfully lock m more -than once without blocking t1. Another thread t2 will block if t2 tries -to lock m while t1 holds the lock to m. If t1 makes k lock calls on m, -then it also needs to make k unlock calls on m before it releases the -lock. -

      - -

      -Our implementation of recursive locks is built on top of a pthread mutex -lock (which is not recursive). It makes use of a pthread condition -variable to have unsuccessful threads wait on the mutex. Waiting threads -are awaken by a signal from the final unlock call made by the thread -holding the lock. -

      - -

      -Recursive locks are defined to be the following type -(H5private.h): -

      - -
      -
      -    typedef struct H5_mutex_struct {
      -      pthread_t owner_thread;         /* current lock owner */
      -      pthread_mutex_t atomic_lock;    /* lock for atomicity of new mechanism */
      -      pthread_cond_t cond_var;        /* condition variable */
      -      unsigned int lock_count;
      -    } H5_mutex_t;
      -  
      -
      - -

      -Detailed implementation code can be found in Appendix A. The -implementation changes are made in H5TS.c. -

      - -

      4.2 First thread initialization

      - -

      -Because the mutex lock associated with a recursive lock cannot be -statically initialized, a mechanism is required to initialize the -recursive lock associated with H5_g so that it can be used -for the first time. -

      - -

      -The pthreads library allows this through the pthread_once call which as -described in section 3.3 allows only the first thread accessing the -library in an application to initialize H5_g. -

      - -

      -In addition to initializing H5_g, it also initializes the -key (see section 3.4) for use with per-thread error stacks (see section -4.3). -

      - -

      -The first thread initialization mechanism is implemented as the function -call H5_first_thread_init() in H5TS.c. This is -described in appendix B. -

      - -

      4.3 Per-thread error stack management

      - -

      -Pthreads allows individual threads to access dynamic and persistent -per-thread data through the use of keys. Each key is associated with -a table that maps threads to data items. Keys can be initialized by -pthread_key_create() in pthreads (see sections 3.4 and 4.2). -Per-thread data items are accessed using a key through the -pthread_getspecific() and pthread_setspecific() -calls to read and write to the association table respectively. -

      - -

      -Per-thread error stacks are accessed through the key -H5_errstk_key_g which is initialized by the first thread -initialization call (see section 4.2). -

      - -

      -In the non-threadsafe version of the library, there is a global stack -variable H5E_stack_g[1] which is no longer defined in the -threadsafe version. At the same time, the macro call to gain access to -the error stack H5E_get_my_stack is changed from: -

      - -
      -
      -    #define H5E_get_my_stack() (H5E_stack_g+0)
      -  
      -
      - -

      -to: -

      - -
      -
      -    #define H5E_get_my_stack() H5E_get_stack()
      -  
      -
      - -

      -where H5E_get_stack() is a surrogate function that does the -following operations: -

      - -
        -
      1. if a thread is attempting to get an error stack for the first - time, the error stack is dynamically allocated for the thread and - associated with H5_errstk_key_g using - pthread_setspecific(). The way we detect if it is the - first time is through pthread_getspecific() which - returns NULL if no previous value is associated with - the thread using the key.
      2. - -
      3. if pthread_getspecific() returns a non-null value, - then that is the pointer to the error stack associated with the - thread and the stack can be used as usual.
      4. -
      - -

      -A final change to the error reporting routines is as follows; the current -implementation reports errors to always be detected at thread 0. In the -threadsafe implementation, this is changed to report the number returned -by a call to pthread_self(). -

      - -

      -The change in code (reflected in H5Eprint of file -H5E.c) is as follows: -

      - -
      -
      -    #ifdef H5_HAVE_THREADSAFE
      -      fprintf (stream, "HDF5-DIAG: Error detected in thread %d."
      -               ,pthread_self());
      -    #else
      -      fprintf (stream, "HDF5-DIAG: Error detected in thread 0.");
      -    #endif
      -  
      -
      - -

      -Code for H5E_get_stack() can be found in Appendix C. All the -above changes were made in H5E.c. -

      - -

      4.4 Thread Cancellation safety

      - -

      -To prevent thread cancellations from killing a thread while it is in the -library, we maintain per-thread information about the cancellability -status of the thread before it entered the library so that we can restore -that same status when the thread leaves the library. -

      - -

      -By enter and leave the library, we mean the points when a -thread makes an API call from a user application and the time that API -call returns. Other API or callback function calls made from within that -API call are considered within the library. -

      - -

      -Because other API calls may be made from within the first API call, we -need to maintain a counter to determine which was the first and -correspondingly the last return. -

      - -

      -When a thread makes an API call, the macro H5_API_SET_CANCEL -calls the worker function H5_cancel_count_inc() which does -the following: -

      - -
        -
      1. if this is the first time the thread has entered the library, - a new cancellability structure needs to be assigned to it.
      2. -
      3. if the thread is already within the library when the API call is - made, then cancel_count is simply incremented. Otherwise, we set - the cancellability state to PTHREAD_CANCEL_DISABLE - while storing the previous state into the cancellability structure. - cancel_count is also incremented in this case.
      4. -
      - -

      -When a thread leaves an API call, the macro -H5_API_UNSET_CANCEL calls the worker function -H5_cancel_count_dec() which does the following: -

      - -
        -
      1. if cancel_count is greater than 1, indicating that the - thread is not yet about to leave the library, then - cancel_count is simply decremented.
      2. -
      3. otherwise, we reset the cancellability state back to its original - state before it entered the library and decrement the count (back - to zero).
      4. -
      - -

      -H5_cancel_count_inc and H5_cancel_count_dec are -described in Appendix D and may be found in H5TS.c. -

      - -

      5. Test programs

      - -

      -Except where stated, all tests involve 16 simultaneous threads that make -use of HDF-5 API calls without any explicit synchronization typically -required in a non-threadsafe environment. -

      - -

      5.1 Data set create and write

      - -

      -The test program sets up 16 threads to simultaneously create 16 -different datasets named from zero to fifteen for a single -file and then writing an integer value into that dataset equal to the -dataset's named value. -

      - -

      -The main thread would join with all 16 threads and attempt to match the -resulting HDF-5 file with expected results - that each dataset contains -the correct value (0 for zero, 1 for one etc ...) and all -datasets were correctly created. -

      - -

      -The test is implemented in the file ttsafe_dcreate.c. -

      - -

      5.2 Test on error stack

      - -

      -The error stack test is one in which 16 threads simultaneously try to -create datasets with the same name. The result, when properly serialized, -should be equivalent to 16 attempts to create the dataset with the same -name. -

      - -

      -The error stack implementation runs correctly if it reports 15 instances -of the dataset name conflict error and finally generates a correct HDF-5 -containing that single dataset. Each thread should report its own stack -of errors with a thread number associated with it. -

      - -

      -The test is implemented in the file ttsafe_error.c. -

      - -

      5.3 Test on cancellation safety

      - -

      -The main idea in thread cancellation safety is as follows; a child thread -is spawned to create and write to a dataset. Following that, it makes a -H5Diterate call on that dataset which activates a callback -function. -

      - -

      -A deliberate barrier is invoked at the callback function which waits for -both the main and child thread to arrive at that point. After that -happens, the main thread proceeds to make a thread cancel call on the -child thread while the latter sleeps for 3 seconds before proceeding to -write a new value to the dataset. -

      - -

      -After the iterate call, the child thread logically proceeds to wait -another 3 seconds before writing another newer value to the dataset. -

      - -

      -The test is correct if the main thread manages to read the second value -at the end of the test. This means that cancellation did not take place -until the end of the iteration call despite of the 3 second wait within -the iteration callback and the extra dataset write operation. -Furthermore, the cancellation should occur before the child can proceed -to write the last value into the dataset. -

      - -

      5.4 Test on attribute creation

      - -

      -A main thread makes 16 threaded calls to H5Acreate with a -generated name for each attribute. Sixteen attributes should be created -for the single dataset in random (chronological) order and receive values -depending on its generated attribute name (e.g. attrib010 would -receive the value 10). -

      - -

      -After joining with all child threads, the main thread proceeds to read -each attribute by generated name to see if the value tallies. Failure is -detected if the attribute name does not exist (meaning they were never -created) or if the wrong values were read back. -

      - -

      A. Recursive Lock implementation code

      - -
      -
      -  void H5_mutex_init(H5_mutex_t *H5_mutex)
      -  {
      -    H5_mutex->owner_thread = NULL;
      -    pthread_mutex_init(&H5_mutex->atomic_lock, NULL);
      -    pthread_cond_init(&H5_mutex->cond_var, NULL);
      -    H5_mutex->lock_count = 0;
      -  }
      -
      -  void H5_mutex_lock(H5_mutex_t *H5_mutex)
      -  {
      -    pthread_mutex_lock(&H5_mutex->atomic_lock);
      -
      -    if (pthread_equal(pthread_self(), H5_mutex->owner_thread)) {
      -    	/* already owned by self - increment count */
      -    	H5_mutex->lock_count++;
      -    } else {
      -    	if (H5_mutex->owner_thread == NULL) {
      -    		/* no one else has locked it - set owner and grab lock */
      -    		H5_mutex->owner_thread = pthread_self();
      -    		H5_mutex->lock_count = 1;
      -    	} else {
      -    		/* if already locked by someone else */
      -    		while (1) {
      -    			pthread_cond_wait(&H5_mutex->cond_var, &H5_mutex->atomic_lock);
      -
      -    			if (H5_mutex->owner_thread == NULL) {
      -    				H5_mutex->owner_thread = pthread_self();
      -    				H5_mutex->lock_count = 1;
      -    				break;
      -    			} /* else do nothing and loop back to wait on condition*/
      -    		}
      -    	}
      -    }
      -
      -    pthread_mutex_unlock(&H5_mutex->atomic_lock);
      -  }
      -
      -  void H5_mutex_unlock(H5_mutex_t *H5_mutex)
      -  {
      -    pthread_mutex_lock(&H5_mutex->atomic_lock);
      -    H5_mutex->lock_count--;
      -
      -    if (H5_mutex->lock_count == 0) {
      -    	H5_mutex->owner_thread = NULL;
      -    	pthread_cond_signal(&H5_mutex->cond_var);
      -    }
      -    pthread_mutex_unlock(&H5_mutex->atomic_lock);
      -  }
      -  
      -
      - -

      B. First thread initialization

      - -
      -
      -  void H5_first_thread_init(void)
      -  {
      -    /* initialize global API mutex lock                      */
      -    H5_g.H5_libinit_g = FALSE;
      -    H5_g.init_lock.owner_thread = NULL;
      -    pthread_mutex_init(&H5_g.init_lock.atomic_lock, NULL);
      -    pthread_cond_init(&H5_g.init_lock.cond_var, NULL);
      -    H5_g.init_lock.lock_count = 0;
      -
      -    /* initialize key for thread-specific error stacks       */
      -    pthread_key_create(&H5_errstk_key_g, NULL);
      -
      -    /* initialize key for thread cancellability mechanism    */
      -    pthread_key_create(&H5_cancel_key_g, NULL);
      -  }
      -  
      -
      - - -

      C. Per-thread error stack acquisition

      - -
      -
      -  H5E_t *H5E_get_stack(void)
      -  {
      -    H5E_t *estack;
      -
      -    if (estack = pthread_getspecific(H5_errstk_key_g)) {
      -    	return estack;
      -    } else {
      -    	/* no associated value with current thread - create one */
      -    	estack = (H5E_t *)malloc(sizeof(H5E_t));
      -    	pthread_setspecific(H5_errstk_key_g, (void *)estack);
      -    	return estack;
      -    }
      -  }
      -  
      -
      - -

      D. Thread cancellation mechanisms

      - -
      -
      -  void H5_cancel_count_inc(void)
      -  {
      -    H5_cancel_t *cancel_counter;
      -
      -    if (cancel_counter = pthread_getspecific(H5_cancel_key_g)) {
      -      /* do nothing here */
      -    } else {
      -      /*
      -       * first time thread calls library - create new counter and
      -       * associate with key
      -       */
      -      cancel_counter = (H5_cancel_t *)malloc(sizeof(H5_cancel_t));
      -      cancel_counter->cancel_count = 0;
      -      pthread_setspecific(H5_cancel_key_g, (void *)cancel_counter);
      -    }
      -
      -    if (cancel_counter->cancel_count == 0) {
      -      /* thread entering library */
      -      pthread_setcancelstate(PTHREAD_CANCEL_DISABLE,
      -                             &(cancel_counter->previous_state));
      -    }
      -
      -    cancel_counter->cancel_count++;
      -  }
      -
      -  void H5_cancel_count_dec(void)
      -  {
      -    H5_cancel_t *cancel_counter = pthread_getspecific(H5_cancel_key_g);
      -
      -    if (cancel_counter->cancel_count == 1)
      -      pthread_setcancelstate(cancel_counter->previous_state, NULL);
      -
      -    cancel_counter->cancel_count--;
      -  }
      -  
      -
      - -

      E. Macro expansion codes

      - -

      E.1 FUNC_ENTER

      - -
      -
      -  /* Initialize the library */                                \
      -  H5_FIRST_THREAD_INIT                                        \
      -  H5_API_UNSET_CANCEL                                         \
      -  H5_API_LOCK_BEGIN                                           \
      -    if (!(H5_INIT_GLOBAL)) {                                  \
      -      H5_INIT_GLOBAL = TRUE;                                  \
      -        if (H5_init_library() < 0) {                          \
      -          HRETURN_ERROR (H5E_FUNC, H5E_CANTINIT, err,         \
      -                        "library initialization failed");     \
      -        }                                                     \
      -    }                                                         \
      -    H5_API_LOCK_END                                           \
      -             :
      -             :
      -             :
      -  
      -
      - -

      E.2 H5_FIRST_THREAD_INIT

      - -
      -
      -  /* Macro for first thread initialization */
      -  #define H5_FIRST_THREAD_INIT                                \
      -    pthread_once(&H5_first_init_g, H5_first_thread_init);
      -  
      -
      - - -

      E.3 H5_API_UNSET_CANCEL

      - -
      -
      -  #define H5_API_UNSET_CANCEL                                 \
      -    if (H5_IS_API(FUNC)) {                                    \
      -      H5_cancel_count_inc();                                  \
      -    }
      -  
      -
      - - -

      E.4 H5_API_LOCK_BEGIN

      - -
      -
      -  #define H5_API_LOCK_BEGIN                                   \
      -     if (H5_IS_API(FUNC)) {                                   \
      -       H5_mutex_lock(&H5_g.init_lock);
      -  
      -
      - - -

      E.5 H5_API_LOCK_END

      - -
      -
      -  #define H5_API_LOCK_END }
      -  
      -
      - - -

      E.6 HRETURN and HRETURN_ERROR

      - -
      -
      -            :
      -            :
      -    H5_API_UNLOCK_BEGIN                                       \
      -    H5_API_UNLOCK_END                                         \
      -    H5_API_SET_CANCEL                                         \
      -    return ret_val;                                           \
      -  }
      -  
      -
      - -

      E.7 H5_API_UNLOCK_BEGIN

      - -
      -
      -  #define H5_API_UNLOCK_BEGIN                                 \
      -    if (H5_IS_API(FUNC)) {                                    \
      -      H5_mutex_unlock(&H5_g.init_lock);
      -  
      -
      - -

      E.8 H5_API_UNLOCK_END

      - -
      -
      -  #define H5_API_UNLOCK_END }
      -  
      -
      - - -

      E.9 H5_API_SET_CANCEL

      - -
      -
      -  #define H5_API_SET_CANCEL                                   \
      -    if (H5_IS_API(FUNC)) {                                    \
      -      H5_cancel_count_dec();                                  \
      -    }
      -  
      -
      - -

      By Chee Wai Lee

      -

      By Bill Wendling

      -

      27. October 2000

      - - - diff --git a/doc/html/TechNotes/VFL.html b/doc/html/TechNotes/VFL.html deleted file mode 100644 index 5674cdb..0000000 --- a/doc/html/TechNotes/VFL.html +++ /dev/null @@ -1,1543 +0,0 @@ - - - - -HDF5 Virtual File Layer - - -

      HDF5

      -

      Virtual File Layer

      -

      Proposal 1999-08-11

      -
      Robb Matzke
      -

      -


      -

      Table of Contents

      - -


      - - -

      Introduction

      - -

      -The HDF5 file format describes how HDF5 data structures and dataset raw -data are mapped to a linear format address space and the HDF5 -library implements that bidirectional mapping in terms of an -API. However, the HDF5 format specifications do not indicate how -the format address space is mapped onto storage and HDF (version 5 and -earlier) simply mapped the format address space directly onto a single -file by convention. - -

      -

      -Since early versions of HDF5 it became apparent that users want the ability to -map the format address space onto different types of storage (a single file, -multiple files, local memory, global memory, network distributed global -memory, a network protocol, etc.) with various types of maps. For -instance, some users want to be able to handle very large format address -spaces on operating systems that support only 2GB files by partitioning the -format address space into equal-sized parts each served by a separate -file. Other users want the same multi-file storage capability but want to -partition the address space according to purpose (raw data in one file, object -headers in another, global heap in a third, etc.) in order to improve I/O -speeds. - -

      -

      -In fact, the number of storage variations is probably larger than the -number of methods that the HDF5 team is capable of implementing and -supporting. Therefore, a Virtual File Layer API is being -implemented which will allow application teams or departments to design -and implement their own mapping between the HDF5 format address space -and storage, with each mapping being a separate file driver -(possibly written in terms of other file drivers). The HDF5 team will -provide a small set of useful file drivers which will also serve as -examples for those who which to write their own: - -

      -
      - -
      H5FD_SEC2 -
      -This is the default driver which uses Posix file-system functions like -read and write to perform I/O to a single file. All I/O -requests are unbuffered although the driver does optimize file seeking -operations to some extent. - -
      H5FD_STDIO -
      -This driver uses functions from `stdio.h' to perform buffered I/O -to a single file. - -
      H5FD_CORE -
      -This driver performs I/O directly to memory and can be used to create small -temporary files that never exist on permanent storage. This type of storage is -generally very fast since the I/O consists only of memory-to-memory copy -operations. - -
      H5FD_MPIIO -
      -This is the driver of choice for accessing files in parallel using MPI and -MPI-IO. It is only predefined if the library is compiled with parallel I/O -support. - -
      H5FD_FAMILY -
      -Large format address spaces are partitioned into more manageable pieces and -sent to separate storage locations using an underlying driver of the user's -choice. The h5repart tool can be used to change the sizes of the -family members when stored as files or to convert a family of files to a -single file or vice versa. - -
      H5FD_SPLIT -
      -The format address space is split into meta data and raw data and each is -mapped onto separate storage using underlying drivers of the user's -choice. The meta data storage can be read by itself (for limited -functionality) or both files can be accessed together. -
      - - - -

      Using a File Driver

      - -

      -Most application writers will use a driver defined by the HDF5 library or -contributed by another programming team. This chapter describes how existing -drivers are used. - -

      - - - -

      Driver Header Files

      - -

      -Each file driver is defined in its own public header file which should -be included by any application which plans to use that driver. The -predefined drivers are in header files whose names begin with -`H5FD' followed by the driver name and `.h'. The `hdf5.h' -header file includes all the predefined driver header files. - -

      -

      -Once the appropriate header file is included a symbol of the form -`H5FD_' followed by the upper-case driver name will be the driver -identification number.(1) However, the -value may change if the library is closed (e.g., by calling -H5close) and the symbol is referenced again. - -

      - - -

      Creating and Opening Files

      - -

      -In order to create or open a file one must define the method by which the -storage is accessed(2) and does so by creating a file access property list(3) which is passed to the H5Fcreate or -H5Fopen function. A default file access property list is created by -calling H5Pcreate and then the file driver information is inserted by -calling a driver initialization function such as H5Pset_fapl_family: - -

      - -
      -hid_t fapl = H5Pcreate(H5P_FILE_ACCESS);
      -size_t member_size = 100*1024*1024; /*100MB*/
      -H5Pset_fapl_family(fapl, member_size, H5P_DEFAULT);
      -hid_t file = H5Fcreate("foo%05d.h5", H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
      -H5Pclose(fapl);
      -
      - -

      -Each file driver will have its own initialization function -whose name is H5Pset_fapl_ followed by the driver name and which -takes a file access property list as the first argument followed by -additional driver-dependent arguments. - -

      -

      -An alternative to using the driver initialization function is to set the -driver directly using the H5Pset_driver function.(4) Its second argument is the file driver identifier, which may -have a different numeric value from run to run depending on the order in which -the file drivers are registered with the library. The third argument -encapsulates the additional arguments of the driver initialization -function. This method only works if the file driver writer has made the -driver-specific property list structure a public datatype, which is -often not the case. - -

      - -
      -hid_t fapl = H5Pcreate(H5P_FILE_ACCESS);
      -static H5FD_family_fapl_t fa = {100*1024*1024, H5P_DEFAULT};
      -H5Pset_driver(fapl, H5FD_FAMILY, &fa);
      -hid_t file = H5Fcreate("foo.h5", H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
      -H5Pclose(fapl);
      -
      - -

      -It is also possible to query the file driver information from a file access -property list by calling H5Pget_driver to determine the driver and then -calling a driver-defined query function to obtain the driver information: - -

      - -
      -hid_t driver = H5Pget_driver(fapl);
      -if (H5FD_SEC2==driver) {
      -    /*nothing further to get*/
      -} else if (H5FD_FAMILY==driver) {
      -    hid_t member_fapl;
      -    haddr_t member_size;
      -    H5Pget_fapl_family(fapl, &member_size, &member_fapl);
      -} else if (....) {
      -    ....
      -}
      -
      - - - -

      Performing I/O

      - -

      -The H5Dread and H5Dwrite functions transfer data between -application memory and the file. They both take an optional data transfer -property list which has some general driver-independent properties and -optional driver-defined properties. An application will typically perform I/O -in one of three styles via the H5Dread or H5Dwrite function: - -

      -

      -Like file access properties in the previous section, data transfer properties -can be set using a driver initialization function or a general purpose -function. For example, to set the MPI-IO driver to use independent access for -I/O operations one would say: - -

      - -
      -hid_t dxpl = H5Pcreate(H5P_DATA_XFER);
      -H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_INDEPENDENT);
      -H5Dread(dataset, type, mspace, fspace, buffer, dxpl);
      -H5Pclose(dxpl);
      -
      - -

      -The alternative is to initialize a driver defined C struct and pass it -to the H5Pset_driver function: - -

      - -
      -hid_t dxpl = H5Pcreate(H5P_DATA_XFER);
      -static H5FD_mpio_dxpl_t dx = {H5FD_MPIO_INDEPENDENT};
      -H5Pset_driver(dxpl, H5FD_MPIO, &dx);
      -H5Dread(dataset, type, mspace, fspace, buffer, dxpl);
      -
      - -

      -The transfer propery list can be queried in a manner similar to the file -access property list: the driver provides a function (or functions) to return -various information about the transfer property list: - -

      - -
      -hid_t driver = H5Pget_driver(dxpl);
      -if (H5FD_MPIO==driver) {
      -    H5FD_mpio_xfer_t xfer_mode;
      -    H5Pget_dxpl_mpio(dxpl, &xfer_mode);
      -} else {
      -    ....
      -}
      -
      - - - -

      File Driver Interchangeability

      - -

      -The HDF5 specifications describe two things: the mapping of data onto a linear -format address space and the C API which performs the mapping. -However, the mapping of the format address space onto storage intentionally -falls outside the scope of the HDF5 specs. This is a direct result of the fact -that it is not generally possible to store information about how to access -storage inside the storage itself. For instance, given only the file name -`/arborea/1225/work/f%03d' the HDF5 library is unable to tell whether the -name refers to a file on the local file system, a family of files on the local -file system, a file on host `arborea' port 1225, a family of files on a -remote system, etc. - -

      -

      -Two ways which library could figure out where the storage is located are: -storage access information can be provided by the user, or the library can try -all known file access methods. This implementation uses the former method. - -

      -

      -In general, if a file was created with one driver then it isn't possible to -open it with another driver. There are of course exceptions: a file created -with MPIO could probably be opened with the sec2 driver, any file created -by the sec2 driver could be opened as a family of files with one member, -etc. In fact, sometimes a file must not only be opened with the same -driver but also with the same driver properties. The predefined drivers are -written in such a way that specifying the correct driver is sufficient for -opening a file. - -

      - - -

      Implementation of a Driver

      - -

      -A driver is simply a collection of functions and data structures which are -registered with the HDF5 library at runtime. The functions fall into these -categories: - -

      - -
        -
      • Functions which operate on modes - -
      • Functions which operate on files - -
      • Functions which operate on the address space - -
      • Functions which operate on data - -
      • Functions for driver initialization - -
      • Optimization functions - -
      - - - -

      Mode Functions

      - -

      -Some drivers need information about file access and data transfers which are -very specific to the driver. The information is usually implemented as a pair -of pointers to C structs which are allocated and initialized as part of an -HDF5 property list and passed down to various driver functions. There are two -classes of settings: file access modes that describe how to access the file -through the driver, and data transfer modes which are settings that control -I/O operations. Each file opened by a particular driver may have a different -access mode; each dataset I/O request for a particular file may have a -different data transfer mode. - -

      -

      -Since each driver has its own particular requirements for various settings, -each driver is responsible for defining the mode structures that it -needs. Higher layers of the library treat the structures as opaque but must be -able to copy and free them. Thus, the driver provides either the size of the -structure or a pair of function pointers for each of the mode types. - -

      -

      -Example: The family driver needs to know how the format address -space is partitioned and the file access property list to use for the -family members. - -

      - -
      -/* Driver-specific file access properties */
      -typedef struct H5FD_family_fapl_t {
      -    hsize_t     memb_size;      /*size of each member                   */
      -    hid_t       memb_fapl_id;   /*file access property list of each memb*/
      -} H5FD_family_fapl_t;
      -
      -/* Driver specific data transfer properties */
      -typedef struct H5FD_family_dxpl_t {
      -    hid_t       memb_dxpl_id;   /*data xfer property list of each memb  */
      -} H5FD_family_dxpl_t;
      -
      - -

      -In order to copy or free one of these structures the member file access -or data transfer properties must also be copied or freed. This is done -by providing a copy and close function for each structure: - -

      -

      -Example: The file access property list copy and close functions -for the family driver: - -

      - -
      -static void *
      -H5FD_family_fapl_copy(const void *_old_fa)
      -{
      -    const H5FD_family_fapl_t *old_fa = (const H5FD_family_fapl_t*)_old_fa;
      -    H5FD_family_fapl_t *new_fa = malloc(sizeof(H5FD_family_fapl_t));
      -    assert(new_fa);
      -
      -    memcpy(new_fa, old_fa, sizeof(H5FD_family_fapl_t));
      -    new_fa->memb_fapl_id = H5Pcopy(old_fa->memb_fapl_id);
      -    return new_fa;
      -}
      -
      -static herr_t
      -H5FD_family_fapl_free(void *_fa)
      -{
      -    H5FD_family_fapl_t  *fa = (H5FD_family_fapl_t*)_fa;
      -    H5Pclose(fa->memb_fapl_id);
      -    free(fa);
      -    return 0;
      -}
      -
      - -

      -Generally when a file is created or opened the file access properties -for the driver are copied into the file pointer which is returned and -they may be modified from their original value (for instance, the file -family driver modifies the member size property when opening an existing -family). In order to support the H5Fget_access_plist function the -driver must provide a fapl_get callback which creates a copy of -the driver-specific properties based on a particular file. - -

      -

      -Example: The file family driver copies the member size file -access property list into the return value: - -

      - -
      -static void *
      -H5FD_family_fapl_get(H5FD_t *_file)
      -{
      -    H5FD_family_t	*file = (H5FD_family_t*)_file;
      -    H5FD_family_fapl_t	*fa = calloc(1, sizeof(H5FD_family_fapl_t*));
      -
      -    fa->memb_size = file->memb_size;
      -    fa->memb_fapl_id = H5Pcopy(file->memb_fapl_id);
      -    return fa;
      -}
      -
      - - - -

      File Functions

      - -

      -The higher layers of the library expect files to have a name and allow the -file to be accessed in various modes. The driver must be able to create a new -file, replace an existing file, or open an existing file. Opening or creating -a file should return a handle, a pointer to a specialization of the -H5FD_t struct, which allows read-only or read-write access and which -will be passed to the other driver functions as they are -called.(5) - -

      - -
      -typedef struct {
      -    /* Public fields */
      -    H5FD_class_t *cls; /*class data defined below*/
      -
      -    /* Private fields -- driver-defined */
      -
      -} H5FD_t;
      -
      - -

      -Example: The family driver requires handles to the underlying -storage, the size of the members for this particular file (which might be -different than the member size specified in the file access property list if -an existing file family is being opened), the name used to open the file in -case additional members must be created, and the flags to use for creating -those additional members. The eoa member caches the size of the format -address space so the family members don't have to be queried in order to find -it. - -

      - -
      -/* The description of a file belonging to this driver. */
      -typedef struct H5FD_family_t {
      -    H5FD_t      pub;            /*public stuff, must be first           */
      -    hid_t       memb_fapl_id;   /*file access property list for members */
      -    hsize_t     memb_size;      /*maximum size of each member file      */
      -    int         nmembs;         /*number of family members              */
      -    int         amembs;         /*number of member slots allocated      */
      -    H5FD_t      **memb;         /*dynamic array of member pointers      */
      -    haddr_t     eoa;            /*end of allocated addresses            */
      -    char        *name;          /*name generator printf format          */
      -    unsigned    flags;          /*flags for opening additional members  */
      -} H5FD_family_t;
      -
      - -

      -Example: The sec2 driver needs to keep track of the underlying Unix -file descriptor and also the end of format address space and current Unix file -size. It also keeps track of the current file position and last operation -(read, write, or unknown) in order to optimize calls to lseek. The -device and inode fields are defined on Unix in order to uniquely -identify the file and will be discussed below. - -

      - -
      -typedef struct H5FD_sec2_t {
      -    H5FD_t      pub;                    /*public stuff, must be first   */
      -    int         fd;                     /*the unix file                 */
      -    haddr_t     eoa;                    /*end of allocated region       */
      -    haddr_t     eof;                    /*end of file; current file size*/
      -    haddr_t     pos;                    /*current file I/O position     */
      -    int         op;                     /*last operation                */
      -    dev_t       device;                 /*file device number            */
      -    ino_t       inode;                  /*file i-node number            */
      -} H5FD_sec2_t;
      -
      - - - -

      Opening Files

      - -

      -All drivers must define a function for opening/creating a file. This -function should have a prototype which is: - -

      -

      -

      -
      Function: static H5FD_t * open (const char *name, unsigned flags, hid_t fapl, haddr_t maxaddr) -
      - -

      -

      -The file name name and file access property list fapl are -the same as were specified in the H5Fcreate or H5Fopen -call. The flags are the same as in those calls also except the -flag H5F_ACC_CREATE is also present if the call was to -H5Fcreate and they are documented in the `H5Fpublic.h' -file. The maxaddr argument is the maximum format address that the -driver should be prepared to handle (the minimum address is always -zero). -

      - -

      -

      -Example: The sec2 driver opens a Unix file with the requested name -and saves information which uniquely identifies the file (the Unix device -number and inode). - -

      - -
      -static H5FD_t *
      -H5FD_sec2_open(const char *name, unsigned flags, hid_t fapl_id/*unused*/,
      -               haddr_t maxaddr)
      -{
      -    unsigned    o_flags;
      -    int         fd;
      -    struct stat sb;
      -    H5FD_sec2_t *file=NULL;
      -    
      -    /* Check arguments */
      -    if (!name || !*name) return NULL;
      -    if (0==maxaddr || HADDR_UNDEF==maxaddr) return NULL;
      -    if (ADDR_OVERFLOW(maxaddr)) return NULL;
      -
      -    /* Build the open flags */
      -    o_flags = (H5F_ACC_RDWR & flags) ? O_RDWR : O_RDONLY;
      -    if (H5F_ACC_TRUNC & flags) o_flags |= O_TRUNC;
      -    if (H5F_ACC_CREAT & flags) o_flags |= O_CREAT;
      -    if (H5F_ACC_EXCL & flags) o_flags |= O_EXCL;
      -
      -    /* Open the file */
      -    if ((fd=open(name, o_flags, 0666))<0) return NULL;
      -    if (fstat(fd, &sb)<0) {
      -        close(fd);
      -        return NULL;
      -    }
      -
      -    /* Create the new file struct */
      -    file = calloc(1, sizeof(H5FD_sec2_t));
      -    file->fd = fd;
      -    file->eof = sb.st_size;
      -    file->pos = HADDR_UNDEF;
      -    file->op = OP_UNKNOWN;
      -    file->device = sb.st_dev;
      -    file->inode = sb.st_ino;
      -
      -    return (H5FD_t*)file;
      -}
      -
      - - - -

      Closing Files

      - -

      -Closing a file simply means that all cached data should be flushed to the next -lower layer, the file should be closed at the next lower layer, and all -file-related data structures should be freed. All information needed by the -close function is already present in the file handle. - -

      -

      -

      -
      Function: static herr_t close (H5FD_t *file) -
      - -

      -

      -The file argument is the handle which was returned by the open -function, and the close should free only memory associated with the -driver-specific part of the handle (the public parts will have already been released by HDF5's virtual file layer). -

      - -

      -

      -Example: The sec2 driver just closes the underlying Unix file, -making sure that the actual file size is the same as that known to the -library by writing a zero to the last file position it hasn't been -written by some previous operation (which happens in the same code which -flushes the file contents and is shown below). - -

      - -
      -static herr_t
      -H5FD_sec2_close(H5FD_t *_file)
      -{
      -    H5FD_sec2_t *file = (H5FD_sec2_t*)_file;
      -
      -    if (H5FD_sec2_flush(_file)<0) return -1;
      -    if (close(file->fd)<0) return -1;
      -    free(file);
      -    return 0;
      -}
      -
      - - - -

      File Keys

      - -

      -Occasionally an application will attempt to open a single file more than one -time in order to obtain multiple handles to the file. HDF5 allows the files to -share information(6) but in order to -accomplish this HDF5 must be able to tell when two names refer to the same -file. It does this by associating a driver-defined key with each file opened -by a driver and comparing the key for an open request with the keys for all -other files currently open by the same driver. - -

      -

      -

      -
      Function: const int cmp (const H5FD_t *f1, const H5FD_t *f2) -
      - -

      -

      -The driver may provide a function which compares two files f1 and -f2 belonging to the same driver and returns a negative, positive, or -zero value a la the strcmp function.(7) If this -function is not provided then HDF5 assumes that all calls to the open -callback return unique files regardless of the arguments and it is up to the -application to avoid doing this if that assumption is incorrect. -

      - -

      -

      -Each time a file is opened the library calls the cmp function to -compare that file with all other files currently open by the same driver and -if one of them matches (at most one can match) then the file which was just -opened is closed and the previously opened file is used instead. - -

      -

      -Opening a file twice with incompatible flags will result in failure. For -instance, opening a file with the truncate flag is a two step process which -first opens the file without truncation so keys can be compared, and if no -matching file is found already open then the file is closed and immediately -reopened with the truncation flag set (if a matching file is already open then -the truncating open will fail). - -

      -

      -Example: The sec2 driver uses the Unix device and i-node as the -key. They were initialized when the file was opened. - -

      - -
      -static int
      -H5FD_sec2_cmp(const H5FD_t *_f1, const H5FD_t *_f2)
      -{
      -    const H5FD_sec2_t   *f1 = (const H5FD_sec2_t*)_f1;
      -    const H5FD_sec2_t   *f2 = (const H5FD_sec2_t*)_f2;
      -
      -    if (f1->device < f2->device) return -1;
      -    if (f1->device > f2->device) return 1;
      -
      -    if (f1->inode < f2->inode) return -1;
      -    if (f1->inode > f2->inode) return 1;
      -
      -    return 0;
      -}
      -
      - - - -

      Saving Modes Across Opens

      - -

      -Some drivers may also need to store certain information in the file superblock -in order to be able to reliably open the file at a later date. This is done by -three functions: one to determine how much space will be necessary to store -the information in the superblock, one to encode the information, and one to -decode the information. These functions are optional, but if any one is -defined then the other two must also be defined. - -

      -

      -

      -
      Function: static hsize_t sb_size (H5FD_t *file) -
      -
      Function: static herr_t sb_encode (H5FD_t *file, char *name, unsigned char *buf) -
      -
      Function: static herr_t sb_decode (H5FD_t *file, const char *name, const unsigned char *buf) -
      - -

      -

      -The sb_size function returns the number of bytes necessary to encode -information needed later if the file is reopened. The sb_encode -function encodes information from the file into buffer buf -allocated by the caller. It also writes an 8-character (plus null -termination) into the name argument, which should be a unique -identification for the driver. The sb_decode function looks at -the name - -

      -

      - decodes -data from the buffer buf and updates the file argument with the new information, -advancing *p in the process. -

      - -

      -

      -The part of this which is somewhat tricky is that the file must be readable -before the superblock information is decoded. File access modes fall outside -the scope of the HDF5 file format, but they are placed inside the boot block -for convenience.(8) - -

      -

      -Example: To be written later. - -

      - - -

      Address Space Functions

      - -

      -HDF5 does not assume that a file is a linear address space of bytes. Instead, -the library will call functions to allocate and free portions of the HDF5 -format address space, which in turn map onto functions in the file driver to -allocate and free portions of file address space. The library tells the file -driver how much format address space it wants to allocate and the driver -decides what format address to use and how that format address is mapped onto -the file address space. Usually the format address is chosen so that the file -address can be calculated in constant time for data I/O operations (which are -always specified by format addresses). - -

      - - - -

      Userblock and Superblock

      - -

      -The HDF5 format allows an optional userblock to appear before the actual HDF5 -data in such a way that if the userblock is sucked out of the file and -everything remaining is shifted downward in the file address space, then the -file is still a valid HDF5 file. The userblock size can be zero or any -multiple of two greater than or equal to 512 and the file superblock begins -immediately after the userblock. - -

      -

      -HDF5 allocates space for the userblock and superblock by calling an -allocation function defined below, which must return a chunk of memory at -format address zero on the first call. - -

      - - -

      Allocation of Format Regions

      - -

      -The library makes many types of allocation requests: - -

      -
      - -
      H5FD_MEM_SUPER -
      -An allocation request for the userblock and/or superblock. -
      H5FD_MEM_BTREE -
      -An allocation request for a node of a B-tree. -
      H5FD_MEM_DRAW -
      -An allocation request for the raw data of a dataset. -
      H5FD_MEM_META -
      -An allocation request for the raw data of a dataset which -the user has indicated will be relatively small. -
      H5FD_MEM_GROUP -
      -An allocation request for a group leaf node (internal nodes of the group tree -are allocated as H5MF_BTREE). -
      H5FD_MEM_GHEAP -
      -An allocation request for a global heap collection. Global heaps are used to -store certain types of references such as dataset region references. The set -of all global heap collections can become quite large. -
      H5FD_MEM_LHEAP -
      -An allocation request for a local heap. Local heaps are used to store the -names which are members of a group. The combined size of all local heaps is a -function of the number of object names in the file. -
      H5FD_MEM_OHDR -
      -An allocation request for (part of) an object header. Object headers are -relatively small and include meta information about objects (like the data -space and type of a dataset) and attributes. -
      - -

      -When a chunk of memory is freed the library adds it to a free list and -allocation requests are satisfied from the free list before requesting memory -from the file driver. Each type of allocation request enumerated above has its -own free list, but the file driver can specify that certain object types can -share a free list. It does so by providing an array which maps a request type -to a free list. If any value of the map is H5MF_DEFAULT (zero) then the -object's own free list is used. The special value H5MF_NOLIST indicates -that the library should not attempt to maintain a free list for that -particular object type, instead calling the file driver each time an object of -that type is freed. - -

      -

      -Mappings predefined in the `H5FDpublic.h' file are: -

      - -
      H5FD_FLMAP_SINGLE -
      -All memory usage types are mapped to a single free list. -
      H5FD_FLMAP_DICHOTOMY -
      -Memory usage is segregated into meta data and raw data for the purposes of -memory management. -
      H5FD_FLMAP_DEFAULT -
      -Each memory usage type has its own free list. -
      - -

      -Example: To make a map that manages object headers on one free list -and everything else on another free list one might initialize the map with the -following code: (the use of H5FD_MEM_SUPER is arbitrary) - -

      - -
      -H5FD_mem_t mt, map[H5FD_MEM_NTYPES];
      -
      -for (mt=0; mt<H5FD_MEM_NTYPES; mt++) {
      -    map[mt] = (H5FD_MEM_OHDR==mt) ? mt : H5FD_MEM_SUPER;
      -}
      -
      - -

      -If an allocation request cannot be satisfied from the free list then one of -two things happen. If the driver defines an allocation callback then it is -used to allocate space; otherwise new memory is allocated from the end of the -format address space by incrementing the end-of-address marker. - -

      -

      -

      -
      Function: static haddr_t alloc (H5FD_t *file, H5MF_type_t type, hsize_t size) -
      - -

      -

      -The file argument is the file from which space is to be allocated, -type is the type of memory being requested (from the list above) without -being mapped according to the freelist map and size is the number of -bytes being requested. The library is allowed to allocate large chunks of -storage and manage them in a layer above the file driver (although the current -library doesn't do that). The allocation function should return a format -address for the first byte allocated. The allocated region extends from that -address for size bytes. If the request cannot be honored then the -undefined address value is returned (HADDR_UNDEF). The first call to -this function for a file which has never had memory allocated must -return a format address of zero or HADDR_UNDEF since this is how the -library allocates space for the userblock and/or superblock. -

      - -

      - -

      -Example: To be written later. - -

      - - -

      Freeing Format Regions

      - -

      -When the library is finished using a certain region of the format address -space it will return the space to the free list according to the type of -memory being freed and the free list map described above. If the free list has -been disabled for a particular memory usage type (according to the free list -map) and the driver defines a free callback then it will be -invoked. The free callback is also invoked for all entries on the free -list when the file is closed. - -

      -

      -

      -
      Function: static herr_t free (H5FD_t *file, H5MF_type_t type, haddr_t addr, hsize_t size) -
      - -

      -

      -The file argument is the file for which space is being freed; type -is the type of object being freed (from the list above) without being mapped -according to the freelist map; addr is the first format address to free; -and size is the size in bytes of the region being freed. The region -being freed may refer to just part of the region originally allocated and/or -may cross allocation boundaries provided all regions being freed have the same -usage type. However, the library will never attempt to free regions which have -already been freed or which have never been allocated. -

      - -

      -

      -A driver may choose to not define the free function, in which case -format addresses will be leaked. This isn't normally a huge problem since the -library contains a simple free list of its own and freeing parts of the format -address space is not a common occurrence. - -

      -

      -Example: To be written later. - -

      - - -

      Querying Address Range

      - -

      -Each file driver must have some mechanism for setting and querying the end of -address, or EOA, marker. The EOA marker is the first format address -after the last format address ever allocated. If the last part of the -allocated address range is freed then the driver may optionally decrease the -eoa marker. - -

      -

      -

      -
      Function: static haddr_t get_eoa (H5FD_t *file) -
      - -

      -

      -This function returns the current value of the EOA marker for the specified -file. -

      - -

      -

      -Example: The sec2 driver just returns the current eoa marker value -which is cached in the file structure: - -

      - -
      -static haddr_t
      -H5FD_sec2_get_eoa(H5FD_t *_file)
      -{
      -    H5FD_sec2_t *file = (H5FD_sec2_t*)_file;
      -    return file->eoa;
      -}
      -
      - -

      -The eoa marker is initially zero when a file is opened and the library may set -it to some other value shortly after the file is opened (after the superblock -is read and the saved eoa marker is determined) or when allocating additional -memory in the absence of an alloc callback (described above). - -

      -

      -Example: The sec2 driver simply caches the eoa marker in the file -structure and does not extend the underlying Unix file. When the file is -flushed or closed then the Unix file size is extended to match the eoa marker. - -

      - -
      -static herr_t
      -H5FD_sec2_set_eoa(H5FD_t *_file, haddr_t addr)
      -{
      -    H5FD_sec2_t *file = (H5FD_sec2_t*)_file;
      -    file->eoa = addr;
      -    return 0;
      -}
      -
      - - - -

      Data Functions

      - -

      -These functions operate on data, transferring a region of the format address -space between memory and files. - -

      - - - -

      Contiguous I/O Functions

      - -

      -A driver must specify two functions to transfer data from the library to the -file and vice versa. - -

      -

      -

      -
      Function: static herr_t read (H5FD_t *file, H5FD_mem_t type, hid_t dxpl, haddr_t addr, hsize_t size, void *buf) -
      -
      Function: static herr_t write (H5FD_t *file, H5FD_mem_t type, hid_t dxpl, haddr_t addr, hsize_t size, const void *buf) -
      - -

      -

      -The read function reads data from file file beginning at address -addr and continuing for size bytes into the buffer buf -supplied by the caller. The write function transfers data in the -opposite direction. Both functions take a data transfer property list -dxpl which indicates the fine points of how the data is to be -transferred and which comes directly from the H5Dread or -H5Dwrite function. Both functions receive type of -data being written, which may allow a driver to tune it's behavior for -different kinds of data. -

      - -

      -

      -Both functions should return a negative value if they fail to transfer the -requested data, or non-negative if they succeed. The library will never -attempt to read from unallocated regions of the format address space. - -

      -

      -Example: The sec2 driver just makes system calls. It tries not to -call lseek if the current operation is the same as the previous -operation and the file position is correct. It also fills the output buffer -with zeros when reading between the current EOF and EOA markers and restarts -system calls which were interrupted. - -

      - -
      -static herr_t
      -H5FD_sec2_read(H5FD_t *_file, H5FD_mem_t type/*unused*/, hid_t dxpl_id/*unused*/,
      -        haddr_t addr, hsize_t size, void *buf/*out*/)
      -{
      -    H5FD_sec2_t         *file = (H5FD_sec2_t*)_file;
      -    ssize_t             nbytes;
      -    
      -    assert(file && file->pub.cls);
      -    assert(buf);
      -
      -    /* Check for overflow conditions */
      -    if (REGION_OVERFLOW(addr, size)) return -1;
      -    if (addr+size>file->eoa) return -1;
      -
      -    /* Seek to the correct location */
      -    if ((addr!=file->pos || OP_READ!=file->op) &&
      -        file_seek(file->fd, (file_offset_t)addr, SEEK_SET)<0) {
      -        file->pos = HADDR_UNDEF;
      -        file->op = OP_UNKNOWN;
      -        return -1;
      -    }
      -
      -    /*
      -     * Read data, being careful of interrupted system calls, partial results,
      -     * and the end of the file.
      -     */
      -    while (size>0) {
      -        do nbytes = read(file->fd, buf, size);
      -        while (-1==nbytes && EINTR==errno);
      -        if (-1==nbytes) {
      -            /* error */
      -            file->pos = HADDR_UNDEF;
      -            file->op = OP_UNKNOWN;
      -            return -1;
      -        }
      -        if (0==nbytes) {
      -            /* end of file but not end of format address space */
      -            memset(buf, 0, size);
      -            size = 0;
      -        }
      -        assert(nbytes>=0);
      -        assert((hsize_t)nbytes<=size);
      -        size -= (hsize_t)nbytes;
      -        addr += (haddr_t)nbytes;
      -        buf = (char*)buf + nbytes;
      -    }
      -    
      -    /* Update current position */
      -    file->pos = addr;
      -    file->op = OP_READ;
      -    return 0;
      -}
      -
      - -

      -Example: The sec2 write callback is similar except it updates -the file EOF marker when extending the file. - -

      - - -

      Flushing Cached Data

      - -

      -Some drivers may desire to cache data in memory in order to make larger I/O -requests to the underlying file and thus improving bandwidth. Such drivers -should register a cache flushing function so that the library can insure that -data has been flushed out of the drivers in response to the application -calling H5Fflush. - -

      -

      -

      -
      Function: static herr_t flush (H5FD_t *file) -
      - -

      -

      -Flush all data for file file to storage. -

      - -

      -

      -Example: The sec2 driver doesn't cache any data but it also doesn't -extend the Unix file as agressively as it should. Therefore, when finalizing a -file it should write a zero to the last byte of the allocated region so that -when reopening the file later the EOF marker will be at least as large as the -EOA marker saved in the superblock (otherwise HDF5 will refuse to open the -file, claiming that the data appears to be truncated). - -

      - -
      -static herr_t
      -H5FD_sec2_flush(H5FD_t *_file)
      -{
      -    H5FD_sec2_t *file = (H5FD_sec2_t*)_file;
      -
      -    if (file->eoa>file->eof) {
      -        if (-1==file_seek(file->fd, file->eoa-1, SEEK_SET)) return -1;
      -        if (write(file->fd, "", 1)!=1) return -1;
      -        file->eof = file->eoa;
      -        file->pos = file->eoa;
      -        file->op = OP_WRITE;
      -    }
      -
      -    return 0;
      -}
      -
      - - - -

      Optimization Functions

      - -

      -The library is capable of performing several generic optimizations on I/O, but -these types of optimizations may not be appropriate for a given VFL driver. -

      - -

      -Each driver may provide a query function to allow the library to query whether -to enable these optimizations. If a driver lacks a query function, the library -will disable all types of optimizations which can be queried. -

      - -

      -

      -
      Function: static herr_t query (const H5FD_t *file, unsigned long *flags) -
      -

      -

      -This function is called by the library to query which optimizations to enable -for I/O to this driver. These are the flags which are currently defined: - -

        -
        -
        H5FD_FEAT_AGGREGATE_METADATA (0x00000001) -
        Defining the H5FD_FEAT_AGGREGATE_METADATA for a VFL driver means that -the library will attempt to allocate a larger block for metadata and -then sub-allocate each metadata request from that larger block. -
        H5FD_FEAT_ACCUMULATE_METADATA (0x00000002) -
        Defining the H5FD_FEAT_ACCUMULATE_METADATA for a VFL driver means that -the library will attempt to cache metadata as it is written to the file -and build up a larger block of metadata to eventually pass to the VFL -'write' routine. -
        H5FD_FEAT_DATA_SIEVE (0x00000004) -
        Defining the H5FD_FEAT_DATA_SIEVE for a VFL driver means that -the library will attempt to cache raw data as it is read from/written to -a file in a "data sieve" buffer. See Rajeev Thakur's papers: -
          -
          -
          http://www.mcs.anl.gov/~thakur/papers/romio-coll.ps.gz -
          http://www.mcs.anl.gov/~thakur/papers/mpio-high-perf.ps.gz -
          -
        -
        -
      -

      - -
      -

      - -

      Registration of a Driver

      - -

      -Before a driver can be used the HDF5 library needs to be told of its -existence. This is done by registering the driver, which results in a driver -identification number. Instead of passing many arguments to the registration -function, the driver information is entered into a structure and the address -of the structure is passed to the registration function where it is -copied. This allows the HDF5 API to be extended while providing backward -compatibility at the source level. - -

      -

      -

      -
      Function: hid_t H5FDregister (H5FD_class_t *cls) -
      - -

      -

      -The driver described by struct cls is registered with the library and an -ID number for the driver is returned. -

      - -

      -

      -The H5FD_class_t type is a struct with the following fields: - -

      -
      - -
      const char *name -
      -A pointer to a constant, null-terminated driver name to be used for debugging -purposes. -
      size_t fapl_size -
      -The size in bytes of the file access mode structure or zero if the driver -supplies a copy function or doesn't define the structure. -
      void *(*fapl_copy)(const void *fapl) -
      -An optional function which copies a driver-defined file access mode structure. -This field takes precedence over fm_size when both are defined. -
      void (*fapl_free)(void *fapl) -
      -An optional function to free the driver-defined file access mode structure. If -null, then the library calls the C free function to free the -structure. -
      size_t dxpl_size -
      -The size in bytes of the data transfer mode structure or zero if the driver -supplies a copy function or doesn't define the structure. -
      void *(*dxpl_copy)(const void *dxpl) -
      -An optional function which copies a driver-defined data transfer mode -structure. This field takes precedence over xm_size when both are -defined. -
      void (*dxpl_free)(void *dxpl) -
      -An optional function to free the driver-defined data transfer mode -structure. If null, then the library calls the C free function to -free the structure. -
      H5FD_t *(*open)(const char *name, unsigned flags, hid_t fapl, haddr_t maxaddr) -
      -The function which opens or creates a new file. -
      herr_t (*close)(H5FD_t *file) -
      -The function which ends access to a file. -
      int (*cmp)(const H5FD_t *f1, const H5FD_t *f2) -
      -An optional function to determine whether two open files have the same key. If -this function is not present then the library assumes that two files will -never be the same. -
      int (*query)(const H5FD_t *f, unsigned long *flags) -
      -An optional function to determine which library optimizations a driver can -support. -
      haddr_t (*alloc)(H5FD_t *file, H5FD_mem_t type, hsize_t size) -
      -An optional function to allocate space in the file. -
      herr_t (*free)(H5FD_t *file, H5FD_mem_t type, haddr_t addr, hsize_t size) -
      -An optional function to free space in the file. -
      haddr_t (*get_eoa)(H5FD_t *file) -
      -A function to query how much of the format address space has been allocated. -
      herr_t (*set_eoa)(H5FD_t *file, haddr_t) -
      -A function to set the end of address space. -
      haddr_t (*get_eof)(H5FD_t *file) -
      -A function to return the current end-of-file marker value. -
      herr_t (*read)(H5FD_t *file, H5FD_mem_t type, hid_t dxpl, haddr_t addr, hsize_t size, void *buffer) -
      -A function to read data from a file. -
      herr_t (*write)(H5FD_t *file, H5FD_mem_t type, hid_t dxpl, haddr_t addr, hsize_t size, const void *buffer) -
      -A function to write data to a file. -
      herr_t (*flush)(H5FD_t *file) -
      -A function which flushes cached data to the file. -
      H5FD_mem_t fl_map[H5FD_MEM_NTYPES] -
      -An array which maps a file allocation request type to a free list. -
      - -

      -Example: The sec2 driver would be registered as: - -

      - -
      -static const H5FD_class_t H5FD_sec2_g = {
      -    "sec2",                                     /*name                  */
      -    MAXADDR,                                    /*maxaddr               */
      -    NULL,                                       /*sb_size               */
      -    NULL,                                       /*sb_encode             */
      -    NULL,                                       /*sb_decode             */
      -    0,                                          /*fapl_size             */
      -    NULL,                                       /*fapl_get              */
      -    NULL,                                       /*fapl_copy             */
      -    NULL,                                       /*fapl_free             */
      -    0,                                          /*dxpl_size             */
      -    NULL,                                       /*dxpl_copy             */
      -    NULL,                                       /*dxpl_free             */
      -    H5FD_sec2_open,                             /*open                  */
      -    H5FD_sec2_close,                            /*close                 */
      -    H5FD_sec2_cmp,                              /*cmp                   */
      -    H5FD_sec2_query,                            /*query                 */
      -    NULL,                                       /*alloc                 */
      -    NULL,                                       /*free                  */
      -    H5FD_sec2_get_eoa,                          /*get_eoa               */
      -    H5FD_sec2_set_eoa,                          /*set_eoa               */
      -    H5FD_sec2_get_eof,                          /*get_eof               */
      -    H5FD_sec2_read,                             /*read                  */
      -    H5FD_sec2_write,                            /*write                 */
      -    H5FD_sec2_flush,                            /*flush                 */
      -    H5FD_FLMAP_SINGLE,                          /*fl_map                */
      -};
      -
      -hid_t
      -H5FD_sec2_init(void)
      -{
      -    if (!H5FD_SEC2_g) {
      -        H5FD_SEC2_g = H5FDregister(&H5FD_sec2_g);
      -    }
      -    return H5FD_SEC2_g;
      -}
      -
      - -

      -A driver can be removed from the library by unregistering it - -

      -

      -

      -
      Function: herr_t H5Dunregister (hid_t driver) -
      -Where driver is the ID number returned when the driver was registered. -
      - -

      -

      -Unregistering a driver makes it unusable for creating new file access or data -transfer property lists but doesn't affect any property lists or files that -already use that driver. - -

      - - - -

      Querying Driver Information

      - -

      -

      -
      Function: void * H5Pget_driver_data (hid_t fapl) -
      -
      Function: void * H5Pget_driver_data (hid_t fxpl) -
      - -

      -

      -This function is intended to be used by driver functions, not applications. -It returns a pointer directly into the file access property list -fapl which is a copy of the driver's file access mode originally -provided to the H5Pset_driver function. If its argument is a data -transfer property list fxpl then it returns a pointer to the -driver-specific data transfer information instead. -

      - -

      - - -

      Miscellaneous

      - -

      -The various private H5F_low_* functions will be replaced by public -H5FD* functions so they can be called from drivers. - -

      -

      -All private functions H5F_addr_* which operate on addresses will be -renamed as public functions by removing the first underscore so they can be -called by drivers. - -

      -

      -The haddr_t address data type will be passed by value throughout the -library. The original intent was that this type would eventually be a union of -file address types for the various drivers and may become quite large, but -that was back when drivers were part of HDF5. It will become an alias for an -unsigned integer type (32 or 64 bits depending on how the library was -configured). - -

      -

      -The various H5F*.c driver files will be renamed H5FD*.c and each -will have a corresponding header file. All driver functions except the -initializer and API will be declared static. - -

      -

      -This documentation didn't cover optimization functions which would be useful -to drivers like MPI-IO. Some drivers may be able to perform data pipeline -operations more efficiently than HDF5 and need to be given a chance to -override those parts of the pipeline. The pipeline would be designed to call -various H5FD optimization functions at various points which return one of -three values: the operation is not implemented by the driver, the operation is -implemented but failed in a non-recoverable manner, the operation is -implemented and succeeded. - -

      -

      -Various parts of HDF5 check the only the top-level file driver and do -something special if it is the MPI-IO driver. However, we might want to be -able to put the MPI-IO driver under other drivers such as the raw part of a -split driver or under a debug driver whose sole purpose is to accumulate -statistics as it passes all requests through to the MPI-IO driver. Therefore -we will probably need a function which takes a format address and or object -type and returns the driver which would have been used at the lowest level to -process the request. - -

      - -


      -

      Footnotes

      -

      (1)

      -

      The driver name is by convention and might -not apply to drivers which are not distributed with HDF5. -

      (2)

      -

      The access method also indicates how to translate -the storage name to a storage server such as a file, network protocol, or -memory. -

      (3)

      -

      The term -"file access property list" is a misnomer since storage isn't -required to be a file. -

      (4)

      -

      This -function is overloaded to operate on data transfer property lists also, as -described below. -

      (5)

      -

      Read-only access is only appropriate when opening an existing -file. -

      (6)

      -

      For instance, writing data to one handle will cause -the data to be immediately visible on the other handle. -

      (7)

      -

      The ordering is -arbitrary as long as it's consistent within a particular file driver. -

      (8)

      -

      File access modes do not describe data, but rather -describe how the HDF5 format address space is mapped to the underlying -file(s). Thus, in general the mapping must be known before the file superblock -can be read. However, the user usually knows enough about the mapping for the -superblock to be readable and once the superblock is read the library can fill -in the missing parts of the mapping. -


      -This document was generated on 18 November 1999 using the -texi2html -translator version 1.51.

      -

      -Updated on 10/24/00 by hand, Quincey Koziol -

      - - diff --git a/doc/html/TechNotes/VFLfunc.html b/doc/html/TechNotes/VFLfunc.html deleted file mode 100644 index 1e33593..0000000 --- a/doc/html/TechNotes/VFLfunc.html +++ /dev/null @@ -1,64 +0,0 @@ - - -VFL Functions - - - - -

      List of HDF5 VFL Functions

      - -
      -The following functions support the HDF5 virtual file layer (VFL), enabling 
      -the creation of customized I/O drivers.  
      -
      -At this time, these functions are documented only in the HDF5 Virtual File 
      -Layer design document and in the source code.
      -
      -
      -
      -herr_t H5Pset_driver(hid_t plist_id, hid_t driver_id,
      -                const void *driver_info)
      -
      -void *H5Pget_driver_info(hid_t plist_id)
      -
      -hid_t H5FDregister(const H5FD_class_t *cls);
      -
      -herr_t H5FDunregister(hid_t driver_id);
      -
      -H5FD_t *H5FDopen(const char *name, unsigned flags, hid_t fapl_id,
      -		haddr_t maxaddr);
      -
      -herr_t H5FDclose(H5FD_t *file);
      -
      -int H5FDcmp(const H5FD_t *f1, const H5FD_t *f2);
      -
      -int H5FDquery(const H5FD_t *f, unsigned long *flags);
      -
      -haddr_t H5FDalloc(H5FD_t *file, H5FD_mem_t type, hsize_t size);
      -
      -herr_t H5FDfree(H5FD_t *file, H5FD_mem_t type, haddr_t addr, hsize_t size);
      -
      -haddr_t H5FDrealloc(H5FD_t *file, H5FD_mem_t type, haddr_t addr,
      -		hsize_t old_size, hsize_t new_size);
      -
      -haddr_t H5FDget_eoa(H5FD_t *file);
      -
      -herr_t H5FDset_eoa(H5FD_t *file, haddr_t eof);
      -
      -haddr_t H5FDget_eof(H5FD_t *file);
      -
      -herr_t H5FDread(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, haddr_t addr, 
      -		size_t size, void *buf/*out*/);
      -
      -herr_t H5FDwrite(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, 
      -		haddr_t addr, size_t size, const void *buf);
      -
      -herr_t H5FDflush(H5FD_t *file, unsigned closing);
      -
      -===========================================
      -Last modified:  25 June 2002
      -HDF Help Desk:  hdfhelp@ncsa.uiuc.edu
      -
      -
      - - diff --git a/doc/html/TechNotes/VLTypes.html b/doc/html/TechNotes/VLTypes.html deleted file mode 100644 index 8a41c10..0000000 --- a/doc/html/TechNotes/VLTypes.html +++ /dev/null @@ -1,150 +0,0 @@ - - - - Variable-Length Datatypes in HDF5 - - - - - - - - - - - -

      Introduction

      -

      Variable-length (VL) datatypes have a great deal of flexibility, but can - be over- or mis-used. VL datatypes are ideal at capturing the notion - that elements in an HDF5 dataset (or attribute) can have different - amounts of information (VL strings are the canonical example), - but they have some drawbacks that this document attempts - to address. -

      - -

      Background

      -

      Because fast random access to dataset elements requires that each - element be a fixed size, the information stored for VL datatype elements - is actually information to locate the VL information, not - the information itself. -

      - -

      When to use VL datatypes

      -

      VL datatypes are designed allow the amount of data stored in each - element of a dataset to vary. This change could be - over time as new values, with different lengths, were written to the - element. Or, the change can be over "space" - the dataset's space, - with each element in the dataset having the same fundamental type, but - different lengths. "Ragged arrays" are the classic example of elements - that change over the "space" of the dataset. If the elements of a - dataset are not going to change over "space" or time, a VL datatype - should probably not be used. -

      - -

      Access Time Penalty

      -

      Accessing VL information requires reading the element in the file, then - using that element's location information to retrieve the VL - information itself. - In the worst case, this obviously doubles the number of disk accesses - required to access the VL information. -

      -

      However, in order to avoid this extra disk access overhead, the HDF5 - library groups VL information together into larger blocks on disk and - performs I/O only on those larger blocks. Additionally, these blocks of - information are cached in memory as long as possible. For most access - patterns, this amortizes the extra disk accesses over enough pieces of - VL information to hide the extra overhead involved. -

      - -

      Storage Space Penalty

      -

      Because VL information must be located and retrieved from another - location in the file, extra information must be stored in the file to - locate - each item of VL information (i.e. each element in a dataset or each - VL field in a compound datatype, etc.). - Currently, that extra information amounts to 32 bytes per VL item. -

      -

      - With some judicious re-architecting of the library and file format, - this could be reduced to 18 bytes per VL item with no loss in - functionality or additional time penalties. With some additional - effort, the space could perhaps could be pushed down as low as 8-10 - bytes per VL item with no loss in functionality, but potentially a - small time penalty. -

      - -

      Chunking and Filters

      -

      Storing data as VL information has some affects on chunked storage and - the filters that can be applied to chunked data. Because the data that - is stored in each chunk is the location to access the VL information, - the actual VL information is not broken up into chunks in the same way - as other data stored in chunks. Additionally, because the - actual VL information is not stored in the chunk, any filters which - operate on a chunk will operate on the information to - locate the VL information, not the VL information itself. -

      - -

      File Drivers

      -

      Because the parallel I/O file drivers (MPI-I/O and MPI-posix) don't - allow objects with varying sizes to be created in the file, attemping - to create - a dataset or attribute with a VL datatype in a file managed by those - drivers will cause the creation call to fail. -

      -

      Additionally, using - VL datatypes and the 'multi' and 'split' file drivers may not operate - in the manner desired. The HDF5 library currently categorizes the - "blocks of VL information" stored in the file as a type of metadata, - which means that they may not be stored with the other raw data for - the file. -

      - -

      Rewriting

      -

      When VL information in the file is re-written, the old VL information - must be releases, space for the new VL information allocated and - the new VL information must be written to the file. This may cause - additional I/O accesses. -

      - - - - - diff --git a/doc/html/TechNotes/Version.html b/doc/html/TechNotes/Version.html deleted file mode 100644 index 0e0853b..0000000 --- a/doc/html/TechNotes/Version.html +++ /dev/null @@ -1,137 +0,0 @@ - - - - Version Numbers - - - -

      HDF5 Release Version Numbers

      - -

      1. Introduction

      - -

      The HDF5 version number is a set of three integer values - written as either hdf5-1.2.3 or hdf5 version - 1.2 release 3. - -

      The 5 is part of the library name and will only - change if the entire file format and library are redesigned - similar in scope to the changes between HDF4 and HDF5. - -

      The 1 is the major version number and - changes when there is an extensive change to the file format or - library API. Such a change will likely require files to be - translated and applications to be modified. This number is not - expected to change frequently. - -

      The 2 is the minor version number and is - incremented by each public release that presents new features. - Even numbers are reserved for stable public versions of the - library while odd numbers are reserved for developement - versions. See the diagram below for examples. - -

      The 3 is the release number. For public - versions of the library, the release number is incremented each - time a bug is fixed and the fix is made available to the public. - For development versions, the release number is incremented more - often (perhaps almost daily). - -

      2. Abbreviated Versions

      - -

      It's often convenient to drop the release number when referring - to a version of the library, like saying version 1.2 of HDF5. - The release number can be any value in this case. - -

      3. Special Versions

      - -

      Version 1.0.0 was released for alpha testing the first week of - March, 1998. The developement version number was incremented to - 1.0.1 and remained constant until the the last week of April, - when the release number started to increase and development - versions were made available to people outside the core HDF5 - development team. - -

      Version 1.0.23 was released mid-July as a second alpha - version. - -

      Version 1.1.0 will be the first official beta release but the - 1.1 branch will also serve as a development branch since we're - not concerned about providing bug fixes separate from normal - development for the beta version. - -

      After the beta release we rolled back the version number so the - first release is version 1.0 and development will continue on - version 1.1. We felt that an initial version of 1.0 was more - important than continuing to increment the pre-release version - numbers. - -

      4. Public versus Development

      - -

      The motivation for separate public and development versions is - that the public version will receive only bug fixes while the - development version will receive new features. This also allows - us to release bug fixes expediently without waiting for the - development version to reach a stable state. - -

      Eventually, the development version will near completion and a - new development branch will fork while the original one enters a - feature freeze state. When the original development branch is - ready for release the minor version number will be incremented - to an even value. - -

      -

      - Version Example -
      Fig 1: Version Example -
      - -

      5. Version Support from the Library

      - -

      The library provides a set of macros and functions to query and - check version numbers. - -

      -
      H5_VERS_MAJOR -
      H5_VERS_MINOR -
      H5_VERS_RELEASE -
      These preprocessor constants are defined in the public - include file and determine the version of the include files. - -

      -
      herr_t H5get_libversion (unsigned *majnum, unsigned - *minnum, unsigned *relnum) -
      This function returns through its arguments the version - numbers for the library to which the application is linked. - -

      -
      void H5check(void) -
      This is a macro that verifies that the version number of the - HDF5 include file used to compile the application matches the - version number of the library to which the application is - linked. This check occurs automatically when the first HDF5 - file is created or opened and is important because a mismatch - between the include files and the library is likely to result - in corrupted data and/or segmentation faults. If a mismatch - is detected the library issues an error message on the - standard error stream and aborts with a core dump. - -

      -
      herr_t H5check_version (unsigned majnum, - unsigned minnum, unsigned relnum) -
      This function is called by the H5check() macro - with the include file version constants. The function - compares its arguments to the result returned by - H5get_libversion() and if a mismatch is detected prints - an error message on the standard error stream and aborts. -
      - -
      -
      HDF Help Desk
      -
      - - - -Last modified: Fri Oct 30 10:32:50 EST 1998 - - - - diff --git a/doc/html/TechNotes/openmp-hdf5.c b/doc/html/TechNotes/openmp-hdf5.c deleted file mode 100644 index 6d61c38..0000000 --- a/doc/html/TechNotes/openmp-hdf5.c +++ /dev/null @@ -1,403 +0,0 @@ -Appendix A: OpenMP-HDF5 Programs -------------------------------------------------------------------------- - Program 1 -------------------------------------------------------------------------- -/* - * This example writes 64 datasets to a HDF5 file, using multiple threads - * (OpenMP). Each thread grab the lock while it tries to call HDF5 functions - * to write out dataset. In this way, the HDF5 calls are serialized, while - * the calculation part is in parallel. This is one of the ways to do - * OpenMP computation with HDF. As long as not to do HDF I/O in parallel, - * it is safe to use HDF. - */ - -#include -#include -#include - -#define NUM_THREADS 4 -#define NUM_MDSET 16 -#define FILE "SDS.h5" -#define NX 5 /* dataset dimensions */ -#define NY 18 -#define RANK 2 - -void CalcWriteData(hid_t, hid_t, hid_t); - -/*Global variable, OpenMP lock*/ -omp_lock_t lock; - - -int -main (void) -{ - hid_t fid; /* file and dataset handles */ - hid_t datatype, dataspace; /* handles */ - hsize_t dimsf[2]; /* dataset dimensions */ - herr_t status; - int i; - - /* - * Create a new file using H5F_ACC_TRUNC access, - * default file creation properties, and default file - * access properties. - */ - fid = H5Fcreate(FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - - /* - * Describe the size of the array and create the data space for fixed - * size dataset. - */ - dimsf[0] = NX; - dimsf[1] = NY; - dataspace = H5Screate_simple(RANK, dimsf, NULL); - - /* - * Define datatype for the data in the file. - * We will store little endian INT numbers. - */ - datatype = H5Tcopy(H5T_NATIVE_DOUBLE); - status = H5Tset_order(datatype, H5T_ORDER_LE); - - /*Disable dynamic allocation of threads*/ - omp_set_dynamic(0); - - /*Allocate threads*/ - omp_set_num_threads(NUM_THREADS); - - /*Initialize lock*/ - omp_init_lock(&lock); - - /*Each thread grab one iteration in the for loop and call function - * CaclWriteData*/ - #pragma omp parallel default(shared) - { - #pragma omp for - for(i=0; i -#include - -#define FILE "SDS.h5" -#define DATASETNAME "IntArray" -#define NX 5 /* dataset dimensions */ -#define NY 6 -#define RANK 2 - -int -main (void) -{ - hid_t file, dataset; /* file and dataset handles */ - hid_t datatype, dataspace; /* handles */ - hsize_t dimsf[2]; /* dataset dimensions */ - herr_t status; - int data[NX][NY]; /* data to write */ - int i, j; - - /* - * Create a new file using H5F_ACC_TRUNC access, - * default file creation properties, and default file - * access properties. - */ - file = H5Fcreate(FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - - /* - * Describe the size of the array and create the data space for fixed - * size dataset. - */ - dimsf[0] = NX; - dimsf[1] = NY; - dataspace = H5Screate_simple(RANK, dimsf, NULL); - - /* - * Define datatype for the data in the file. - * We will store little endian INT numbers. - */ - datatype = H5Tcopy(H5T_NATIVE_INT); - status = H5Tset_order(datatype, H5T_ORDER_LE); - - /* Disable dynamic allocation of threads. */ - omp_set_dynamic(0); - - /* Allocate 2 threads */ - omp_set_num_threads(2); - - - /* Parallel computation. Let 2 threads handle this nested for loops - * in parallel. Only one data array is computed. */ - #pragma omp parallel default(shared) - { - #pragma omp for - for (j = 0; j < NX; j++) { - #pragma omp parallel shared(j, NY) - { - #pragma omp for - for (i = 0; i < NY; i++) - data[j][i] = i + j; - } - } - } - - /* Write this dataset into HDF file */ - dataset = H5Dcreate(file, DATASETNAME, datatype, dataspace, - H5P_DEFAULT); - H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, - H5P_DEFAULT, data); - H5Dclose(dataset); - - /* - * Close/release resources. - */ - H5Sclose(dataspace); - H5Tclose(datatype); - H5Fclose(file); - - return 0; -} - - - -------------------------------------------------------------------------- - Program 3 -------------------------------------------------------------------------- -/* - * This example create two threads. Each thread writes a dataset to - * the HDF5 file in parallel. This program only works occasionally. - */ - -#include -#include - -#define FILE "SDS.h5" -#define NX 5 /* dataset dimensions */ -#define NY 6 -#define RANK 2 - -void writeData(int, hid_t, hid_t, hid_t, char*); - -int main (void) -{ - hid_t file; /* file and dataset handles */ - hid_t datatype, dataspace; /* handles */ - hsize_t dimsf[2]; /* dataset dimensions */ - herr_t status; - int id; - char dname[2][10] = {"Array1", "Array2"}; - - /* - * Create a new file using H5F_ACC_TRUNC access, - * default file creation properties, and default file - * access properties. - */ - file = H5Fcreate(FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - - /* - * Describe the size of the array and create the data space for fixed - * size dataset. - */ - dimsf[0] = NX; - dimsf[1] = NY; - dataspace = H5Screate_simple(RANK, dimsf, NULL); - - /* - * Define datatype for the data in the file. - * We will store little endian INT numbers. - */ - datatype = H5Tcopy(H5T_NATIVE_INT); - status = H5Tset_order(datatype, H5T_ORDER_LE); - - /*Disable dynamic allocation of threads*/ - omp_set_dynamic(0); - - /*Allocate 2 threads*/ - omp_set_num_threads(2); - - /*Parallel Part: each thread call function writeData; id is private to - * thread while others are shared */ - #pragma omp parallel shared(file, dataspace, datatype, dname) private(id) - { - id = omp_get_thread_num(); - writeData(id, file, dataspace, datatype, dname[id]); - } - - - /* - * Close/release resources. - */ - H5Sclose(dataspace); - H5Tclose(datatype); - H5Fclose(file); - - return 0; -} - - -/*Each thread call this function to write a dataset into HDF5 file*/ - -void writeData(int id, hid_t file, hid_t dataspace, hid_t datatype, char *dname) -{ - int data[NX][NY]; - hid_t dataset; - int i, j; - - for (j = 0; j < NX; j++) { - for (i = 0; i < NY; i++) - data[j][i] = i + j + id; - } - - dataset = H5Dcreate(file, dname, datatype, dataspace, - H5P_DEFAULT); - H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, - H5P_DEFAULT, data); - H5Dclose(dataset); -} - - -------------------------------------------------------------------------- - Program 4 -------------------------------------------------------------------------- -/* - * This example compute and write two datasets into HDF file in - * parallel. It also only works occasionally. - */ - -#include -#include - -#define FILE "SDS.h5" -#define DATASETNAME "IntArray" -#define NX 5 /* dataset dimensions */ -#define NY 6 -#define RANK 2 - -int -main (void) -{ - hid_t file, dataset; /* file and dataset handles */ - hid_t datatype, dataspace; /* handles */ - hsize_t dimsf[2]; /* dataset dimensions */ - herr_t status; - int data[NX][NY]; /* data to write */ - int i, j, id; - char dname[2][10] = {"intArray1", "intArray2"}; - - /* - * Create a new file using H5F_ACC_TRUNC access, - * default file creation properties, and default file - * access properties. - */ - file = H5Fcreate(FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - - /* - * Describe the size of the array and create the data space for fixed - * size dataset. - */ - dimsf[0] = NX; - dimsf[1] = NY; - dataspace = H5Screate_simple(RANK, dimsf, NULL); - - /* - * Define datatype for the data in the file. - * We will store little endian INT numbers. - */ - datatype = H5Tcopy(H5T_NATIVE_INT); - status = H5Tset_order(datatype, H5T_ORDER_LE); - - omp_set_dynamic(0); - omp_set_num_threads(2); - - - /* This part of program compute and write two datasets in parallel. */ - #pragma omp parallel shared(file, datatype, dataspace, dname) private(id, j, i, data, dataset) - { - id = omp_get_thread_num(); - for (j = 0; j < NX; j++) { - for (i = 0; i < NY; i++) - data[j][i] = i + j + id; - } - - dataset = H5Dcreate(file, dname[id], datatype, dataspace, - H5P_DEFAULT); - H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, - H5P_DEFAULT, data); - H5Dclose(dataset); - } - - - /* - * Close/release resources. - */ - H5Sclose(dataspace); - H5Tclose(datatype); - H5Fclose(file); - - return 0; -} - diff --git a/doc/html/TechNotes/openmp-hdf5.html b/doc/html/TechNotes/openmp-hdf5.html deleted file mode 100644 index ff13b81..0000000 --- a/doc/html/TechNotes/openmp-hdf5.html +++ /dev/null @@ -1,67 +0,0 @@ -
      -		    Using HDF5 with OpenMP
      -		    ----------------------
      -
      -
      -1. Introduction to OpenMP
      --------------------------
      -
      -    - For shared-memory parallelism
      -    - A combination of library and directives
      -    - Available for C/C++ and Fortran
      -    - SGI leading effort
      -    - Information at http://www.openmp.org and 
      -      http://www.sgi.com/software/openmp
      -
      -2. Programming(SGI MPISpro compiler and C language)
      ----------------------------------------------------
      -
      -    - Turn on compiler '-mp' option
      -    - Include 'omp.h' library in program
      -    - Use library functions, directives and environment variables
      -
      -
      -3. Sample Programs
      -------------------
      -
      -Appendix A contains four OpenMP-HDF5 test programs.  (They are derived from
      -the hdf5/examples/h5_write.c).  The purpose of these program is to 
      -test OpenMP parallelism with the HDF5 library.
      -
      -All tests were run on modi4 with SGI MPISpro compiler(cc) and make.
      -Program 1 and Program 2 are the working programs.  Program 3 and Program 4
      -work occasionally due to racing conditions.
      -Follow the following steps to try the programs.
      -
      -  a.  have your hdf5 library compiled, 
      -  b.  go to hdf5/examples directory,
      -  c.  add -mp option to the end of the CFLAGS list in the Makefile.  If you 
      -      have the compiled program in another directory, you should go to the 
      -      examples in that directory.
      -  d.  modify the hdf5/examples/h5_write.c according to the program attached 
      -      here.  
      -  e.  use hdf5/tools/h5dump to examine the output file.
      -
      -
      -4. Conclusion
      --------------
      -
      -It is not safe to invoke HDF5 library calls via multiple threads in an
      -OpenMP program.  But if one serializes HDF5 calls as illustrated in Program 1,
      -the HDF5 library works correctly with the OpenMP programs.
      -
      -The serialization of HDF5 calls will slow down the OpenMP program unnecessarily.
      -Future study is needed to check possible ways to "un-seralize" the HDF5 calls.
      -One possibility is that the HDF5 library has a beta-version of Thread-safe
      -implmentation though it is for Pthreads environment.  One can check on the
      -feasibility of running OpenMP programs with this version of HDF5 Thread-safe
      -library.
      -
      -
      -
      -Appendix A: OpenMP-HDF5 Programs
      -
      --------
      -Updated: 2000/11/28
      -Contact: hdfhelp@ncsa.uiuc.edu
      -
      diff --git a/doc/html/TechNotes/pipe1.gif b/doc/html/TechNotes/pipe1.gif deleted file mode 100644 index 3b489a6..0000000 Binary files a/doc/html/TechNotes/pipe1.gif and /dev/null differ diff --git a/doc/html/TechNotes/pipe1.obj b/doc/html/TechNotes/pipe1.obj deleted file mode 100644 index 41f3461..0000000 --- a/doc/html/TechNotes/pipe1.obj +++ /dev/null @@ -1,136 +0,0 @@ -%TGIF 3.0-p5 -state(1,33,100,0,0,0,8,1,9,1,1,0,0,0,0,1,1,'Helvetica',0,17,0,0,0,10,0,0,1,1,0,16,0,0,1,1,1,0,1408,1088,0,0,2880). -% -% @(#)$Header$ -% %W% -% -unit("1 pixel/pixel"). -page(1,"",1). -box('black',64,64,128,256,0,1,1,22,0,0,0,0,0,'1',[ -]). -box('black',80,96,112,224,26,1,1,23,0,0,0,0,0,'1',[ -]). -poly('black',2,[ - 128,160,912,160],1,2,1,24,0,0,0,0,10,4,0,0,0,'2','10','4', - "0",[ -]). -poly('black',5,[ - 160,160,144,224,160,272,176,224,160,160],1,2,1,25,2,0,0,0,10,4,0,0,0,'2','10','4', - "",[ -]). -poly('black',5,[ - 848,160,832,224,848,272,864,224,848,160],1,2,1,34,2,0,0,0,10,4,0,0,0,'2','10','4', - "",[ -]). -box('black',464,192,496,256,26,1,1,39,0,0,0,0,0,'1',[ -]). -poly('black',2,[ - 160,224,464,224],1,2,1,40,0,26,0,0,10,4,0,0,0,'2','10','4', - "0",[ -]). -poly('black',2,[ - 496,224,848,224],1,2,1,41,0,26,0,0,10,4,0,0,0,'2','10','4', - "0",[ -]). -poly('black',5,[ - 192,224,176,288,192,336,208,288,192,224],1,2,1,42,2,0,0,0,10,4,0,0,0,'2','10','4', - "",[ -]). -poly('black',5,[ - 432,224,416,288,432,336,448,288,432,224],1,2,1,43,2,0,0,0,10,4,0,0,0,'2','10','4', - "",[ -]). -poly('black',2,[ - 192,288,432,288],1,2,1,44,0,26,0,0,10,4,0,0,0,'2','10','4', - "0",[ -]). -box('black',464,352,496,416,26,1,1,45,0,0,0,0,0,'1',[ -]). -poly('black',5,[ - 528,224,512,288,528,336,544,288,528,224],1,2,1,46,2,0,0,0,10,4,0,0,0,'2','10','4', - "",[ -]). -poly('black',5,[ - 816,224,800,288,816,336,832,288,816,224],1,2,1,47,2,0,0,0,10,4,0,0,0,'2','10','4', - "",[ -]). -poly('black',2,[ - 528,288,816,288],1,2,1,48,0,26,0,0,10,4,0,0,0,'2','10','4', - "0",[ -]). -poly('black',5,[ - 464,256,456,304,464,328,488,304,488,256],1,2,1,62,2,0,0,0,10,4,0,0,0,'2','10','4', - "",[ -]). -poly('black',2,[ - 480,352,488,304],2,2,1,85,0,0,0,0,10,4,0,0,0,'2','10','4', - "0",[ -]). -box('black',912,64,976,256,0,1,1,87,0,0,0,0,0,'1',[ -]). -box('black',928,96,960,224,26,1,1,88,0,0,0,0,0,'1',[ -]). -text('black',96,48,'Helvetica',0,17,1,1,0,1,21,15,89,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "File"]). -text('black',944,48,'Helvetica',0,17,1,1,0,1,64,15,93,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Application"]). -text('black',480,144,'Helvetica',0,17,1,1,0,1,65,15,99,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5D_read()"]). -text('black',480,128,'Helvetica',0,17,1,1,0,1,58,15,108,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5Dread()"]). -text('black',304,208,'Helvetica',0,17,1,1,0,1,86,15,115,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5F_arr_read()"]). -text('black',304,192,'Helvetica',0,17,1,1,0,1,99,15,119,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5S_simp_fgath()"]). -text('black',296,288,'Helvetica',0,17,1,1,0,1,101,15,125,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5F_block_read()"]). -text('black',296,304,'Helvetica',0,17,1,1,0,1,90,15,132,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5F_low_read()"]). -text('black',296,320,'Helvetica',0,17,1,1,0,1,98,15,136,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5F_sec2_read()"]). -text('black',296,336,'Helvetica',0,17,1,1,0,1,33,15,140,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "read()"]). -text('black',664,208,'Helvetica',0,17,1,1,0,1,106,15,146,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5V_stride_copy()"]). -text('black',664,176,'Helvetica',0,17,1,1,0,1,104,15,150,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5S_simp_mscat()"]). -text('black',664,272,'Helvetica',0,17,1,1,0,1,54,15,154,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "memcpy()"]). -text('black',384,392,'Helvetica',0,17,1,1,0,1,105,15,170,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5T_conv_struct()"]). -poly('black',4,[ - 392,384,400,352,440,368,456,336],1,1,1,172,1,0,0,0,8,3,0,0,0,'1','8','3', - "6",[ -]). -text('black',480,176,'Helvetica',0,17,1,1,0,1,44,15,176,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "TCONV"]). -text('black',480,416,'Helvetica',0,17,1,1,0,1,25,15,182,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "BKG"]). -box('black',48,32,992,512,0,1,1,186,0,0,0,0,0,'1',[ -]). -poly('black',5,[ - 72,392,56,456,72,504,88,456,72,392],1,2,1,188,2,0,0,0,10,4,0,0,0,'2','10','4', - "",[ -]). -text('black',96,448,'Helvetica',0,17,1,0,0,1,46,15,189,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "== Loop"]). -poly('black',3,[ - 48,384,152,384,152,512],0,1,1,191,0,0,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -text('black',480,40,'Helvetica',0,24,1,1,0,1,380,29,197,0,24,5,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Fig 1: Internal Contiguous Storage"]). -text('black',136,144,'Helvetica',0,17,1,1,0,1,9,15,201,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "A"]). -text('black',160,208,'Helvetica',0,17,1,1,0,1,8,15,207,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "B"]). -text('black',192,272,'Helvetica',0,17,1,1,0,1,9,15,211,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "C"]). -text('black',504,208,'Helvetica',0,17,1,1,0,1,8,15,215,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "E"]). -text('black',528,272,'Helvetica',0,17,1,1,0,1,8,15,223,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "F"]). -text('black',464,304,'Helvetica',0,17,1,1,0,1,9,15,231,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "D"]). -text('black',664,192,'Helvetica',0,17,1,1,0,1,107,15,324,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5V_hyper_copy()"]). diff --git a/doc/html/TechNotes/pipe2.gif b/doc/html/TechNotes/pipe2.gif deleted file mode 100644 index 3a0c947..0000000 Binary files a/doc/html/TechNotes/pipe2.gif and /dev/null differ diff --git a/doc/html/TechNotes/pipe2.obj b/doc/html/TechNotes/pipe2.obj deleted file mode 100644 index 70d9c18..0000000 --- a/doc/html/TechNotes/pipe2.obj +++ /dev/null @@ -1,168 +0,0 @@ -%TGIF 3.0-p5 -state(1,33,100,0,0,0,8,1,9,1,1,1,1,0,0,1,1,'Helvetica',0,17,0,0,0,10,0,0,1,1,0,16,0,0,1,1,1,0,1408,1088,0,0,2880). -% -% @(#)$Header$ -% %W% -% -unit("1 pixel/pixel"). -page(1,"",1). -box('black',64,64,128,256,0,1,1,22,0,0,0,0,0,'1',[ -]). -box('black',80,96,112,224,26,1,1,23,0,0,0,0,0,'1',[ -]). -poly('black',2,[ - 128,160,912,160],1,2,1,24,0,0,0,0,10,4,0,0,0,'2','10','4', - "0",[ -]). -poly('black',5,[ - 160,160,144,224,160,272,176,224,160,160],1,2,1,25,2,0,0,0,10,4,0,0,0,'2','10','4', - "",[ -]). -poly('black',5,[ - 848,160,832,224,848,272,864,224,848,160],1,2,1,34,2,0,0,0,10,4,0,0,0,'2','10','4', - "",[ -]). -box('black',464,192,496,256,26,1,1,39,0,0,0,0,0,'1',[ -]). -poly('black',2,[ - 160,224,464,224],1,2,1,40,0,26,0,0,10,4,0,0,0,'2','10','4', - "0",[ -]). -poly('black',2,[ - 496,224,848,224],1,2,1,41,0,26,0,0,10,4,0,0,0,'2','10','4', - "0",[ -]). -poly('black',5,[ - 192,224,176,288,192,336,208,288,192,224],1,2,1,42,2,0,0,0,10,4,0,0,0,'2','10','4', - "",[ -]). -poly('black',5,[ - 432,224,416,288,432,336,448,288,432,224],1,2,1,43,2,0,0,0,10,4,0,0,0,'2','10','4', - "",[ -]). -poly('black',2,[ - 192,288,432,288],1,2,1,44,0,26,0,0,10,4,0,0,0,'2','10','4', - "0",[ -]). -box('black',464,352,496,416,26,1,1,45,0,0,0,0,0,'1',[ -]). -poly('black',5,[ - 528,224,512,288,528,336,544,288,528,224],1,2,1,46,2,0,0,0,10,4,0,0,0,'2','10','4', - "",[ -]). -poly('black',5,[ - 816,224,800,288,816,336,832,288,816,224],1,2,1,47,2,0,0,0,10,4,0,0,0,'2','10','4', - "",[ -]). -poly('black',2,[ - 528,288,816,288],1,2,1,48,0,26,0,0,10,4,0,0,0,'2','10','4', - "0",[ -]). -poly('black',5,[ - 848,240,848,352,832,384,800,384,496,384],1,2,1,55,1,0,0,0,10,4,0,0,0,'2','10','4', - "70",[ -]). -poly('black',5,[ - 528,384,512,448,528,496,544,448,528,384],1,2,1,57,2,0,0,0,10,4,0,0,0,'2','10','4', - "",[ -]). -poly('black',5,[ - 800,384,784,448,800,496,816,448,800,384],1,2,1,58,2,0,0,0,10,4,0,0,0,'2','10','4', - "",[ -]). -poly('black',2,[ - 800,448,528,448],1,2,1,61,0,0,0,0,10,4,0,0,0,'2','10','4', - "0",[ -]). -poly('black',5,[ - 464,256,456,304,464,328,488,304,488,256],1,2,1,62,2,0,0,0,10,4,0,0,0,'2','10','4', - "",[ -]). -poly('black',2,[ - 480,352,488,304],0,2,1,85,0,0,0,0,10,4,0,0,0,'2','10','4', - "0",[ -]). -box('black',912,64,976,256,0,1,1,87,0,0,0,0,0,'1',[ -]). -box('black',928,96,960,224,26,1,1,88,0,0,0,0,0,'1',[ -]). -text('black',96,48,'Helvetica',0,17,1,1,0,1,21,15,89,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "File"]). -text('black',944,48,'Helvetica',0,17,1,1,0,1,64,15,93,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Application"]). -text('black',480,144,'Helvetica',0,17,1,1,0,1,65,15,99,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5D_read()"]). -text('black',480,128,'Helvetica',0,17,1,1,0,1,58,15,108,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5Dread()"]). -text('black',304,208,'Helvetica',0,17,1,1,0,1,86,15,115,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5F_arr_read()"]). -text('black',304,192,'Helvetica',0,17,1,1,0,1,99,15,119,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5S_simp_fgath()"]). -text('black',296,288,'Helvetica',0,17,1,1,0,1,101,15,125,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5F_block_read()"]). -text('black',296,304,'Helvetica',0,17,1,1,0,1,90,15,132,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5F_low_read()"]). -text('black',296,320,'Helvetica',0,17,1,1,0,1,98,15,136,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5F_sec2_read()"]). -text('black',296,336,'Helvetica',0,17,1,1,0,1,33,15,140,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "read()"]). -text('black',664,208,'Helvetica',0,17,1,1,0,1,106,15,146,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5V_stride_copy()"]). -text('black',664,176,'Helvetica',0,17,1,1,0,1,104,15,150,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5S_simp_mscat()"]). -text('black',664,272,'Helvetica',0,17,1,1,0,1,54,15,154,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "memcpy()"]). -text('black',672,368,'Helvetica',0,17,1,1,0,1,106,15,158,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5V_stride_copy()"]). -text('black',672,336,'Helvetica',0,17,1,1,0,1,105,15,162,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5S_simp_mgath()"]). -text('black',672,432,'Helvetica',0,17,1,1,0,1,54,15,166,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "memcpy()"]). -text('black',384,392,'Helvetica',0,17,1,1,0,1,105,15,170,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5T_conv_struct()"]). -poly('black',4,[ - 392,384,400,352,440,368,456,336],1,1,1,172,1,0,0,0,8,3,0,0,0,'1','8','3', - "6",[ -]). -text('black',480,176,'Helvetica',0,17,1,1,0,1,44,15,176,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "TCONV"]). -text('black',480,416,'Helvetica',0,17,1,1,0,1,25,15,182,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "BKG"]). -box('black',48,32,992,512,0,1,1,186,0,0,0,0,0,'1',[ -]). -poly('black',5,[ - 72,392,56,456,72,504,88,456,72,392],1,2,1,188,2,0,0,0,10,4,0,0,0,'2','10','4', - "",[ -]). -text('black',96,448,'Helvetica',0,17,1,0,0,1,46,15,189,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "== Loop"]). -poly('black',3,[ - 48,384,152,384,152,512],0,1,1,191,0,0,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -text('black',480,40,'Helvetica',0,24,1,1,0,1,404,29,197,0,24,5,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Fig 2: Partially Initialized Destination"]). -text('black',136,144,'Helvetica',0,17,1,1,0,1,9,15,201,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "A"]). -text('black',160,208,'Helvetica',0,17,1,1,0,1,8,15,207,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "B"]). -text('black',192,272,'Helvetica',0,17,1,1,0,1,9,15,211,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "C"]). -text('black',504,208,'Helvetica',0,17,1,1,0,1,8,15,215,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "E"]). -text('black',528,272,'Helvetica',0,17,1,1,0,1,8,15,223,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "F"]). -text('black',856,288,'Helvetica',0,17,1,1,0,1,9,15,225,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "G"]). -text('black',800,432,'Helvetica',0,17,1,1,0,1,9,15,229,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H"]). -text('black',464,304,'Helvetica',0,17,1,1,0,1,9,15,231,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "D"]). -poly('black',4,[ - 848,240,848,224,864,224,904,224],0,2,1,318,1,0,0,0,10,4,0,0,0,'2','10','4', - "6",[ -]). -text('black',664,192,'Helvetica',0,17,1,1,0,1,107,15,326,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5V_hyper_copy()"]). -text('black',672,352,'Helvetica',0,17,1,1,0,1,107,15,334,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5V_hyper_copy()"]). diff --git a/doc/html/TechNotes/pipe3.gif b/doc/html/TechNotes/pipe3.gif deleted file mode 100644 index 26d82ad..0000000 Binary files a/doc/html/TechNotes/pipe3.gif and /dev/null differ diff --git a/doc/html/TechNotes/pipe3.obj b/doc/html/TechNotes/pipe3.obj deleted file mode 100644 index cdfef7c..0000000 --- a/doc/html/TechNotes/pipe3.obj +++ /dev/null @@ -1,70 +0,0 @@ -%TGIF 3.0-p5 -state(1,33,100,0,0,0,8,1,9,1,1,0,0,0,0,1,1,'Helvetica',0,17,0,0,0,10,0,0,1,1,0,16,0,0,1,1,1,0,1408,1088,0,0,2880). -% -% @(#)$Header$ -% %W% -% -unit("1 pixel/pixel"). -page(1,"",1). -box('black',64,64,128,256,0,1,1,22,0,0,0,0,0,'1',[ -]). -box('black',80,96,112,224,26,1,1,23,0,0,0,0,0,'1',[ -]). -poly('black',2,[ - 128,160,912,160],1,2,1,24,0,0,0,0,10,4,0,0,0,'2','10','4', - "0",[ -]). -box('black',912,64,976,256,0,1,1,87,0,0,0,0,0,'1',[ -]). -box('black',928,96,960,224,26,1,1,88,0,0,0,0,0,'1',[ -]). -text('black',96,48,'Helvetica',0,17,1,1,0,1,21,15,89,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "File"]). -text('black',944,48,'Helvetica',0,17,1,1,0,1,64,15,93,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Application"]). -text('black',480,104,'Helvetica',0,17,1,1,0,1,65,15,99,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5D_read()"]). -text('black',480,88,'Helvetica',0,17,1,1,0,1,58,15,108,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5Dread()"]). -box('black',48,32,992,512,0,1,1,186,0,0,0,0,0,'1',[ -]). -poly('black',5,[ - 72,392,56,456,72,504,88,456,72,392],1,2,1,188,2,0,0,0,10,4,0,0,0,'2','10','4', - "",[ -]). -text('black',96,448,'Helvetica',0,17,1,0,0,1,46,15,189,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "== Loop"]). -poly('black',3,[ - 48,384,152,384,152,512],0,1,1,191,0,0,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -text('black',480,40,'Helvetica',0,24,1,1,0,1,295,29,197,0,24,5,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Fig 3: No Type Conversion"]). -text('black',136,144,'Helvetica',0,17,1,1,0,1,9,15,201,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "A"]). -poly('black',5,[ - 152,160,136,224,152,272,168,224,152,160],1,2,1,273,2,0,0,0,10,4,0,0,0,'2','10','4', - "",[ -]). -text('black',480,120,'Helvetica',0,17,1,1,0,1,96,15,277,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5S_simp_read()"]). -text('black',480,136,'Helvetica',0,17,1,1,0,1,86,15,281,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5F_arr_read()"]). -poly('black',5,[ - 880,160,864,224,880,272,896,224,880,160],1,2,1,283,2,0,0,0,10,4,0,0,0,'2','10','4', - "",[ -]). -poly('black',2,[ - 152,224,880,224],1,2,1,286,0,0,0,0,10,4,0,0,0,'2','10','4', - "0",[ -]). -text('black',480,232,'Helvetica',0,17,1,1,0,1,101,15,291,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5F_block_read()"]). -text('black',480,248,'Helvetica',0,17,1,1,0,1,90,15,293,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5F_low_read()"]). -text('black',480,264,'Helvetica',0,17,1,1,0,1,98,15,309,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5F_sec2_read()"]). -text('black',480,280,'Helvetica',0,17,1,1,0,1,33,15,311,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "read()"]). -text('black',176,208,'Helvetica',0,17,1,1,0,1,8,15,418,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "B"]). diff --git a/doc/html/TechNotes/pipe4.gif b/doc/html/TechNotes/pipe4.gif deleted file mode 100644 index a3a857b..0000000 Binary files a/doc/html/TechNotes/pipe4.gif and /dev/null differ diff --git a/doc/html/TechNotes/pipe4.obj b/doc/html/TechNotes/pipe4.obj deleted file mode 100644 index 6f50123..0000000 --- a/doc/html/TechNotes/pipe4.obj +++ /dev/null @@ -1,92 +0,0 @@ -%TGIF 3.0-p5 -state(1,33,100,0,0,0,8,1,9,1,1,1,2,1,0,1,0,'Helvetica',0,17,0,0,0,10,0,0,1,1,0,16,0,0,1,1,1,0,1408,1088,0,0,2880). -% -% @(#)$Header$ -% %W% -% -unit("1 pixel/pixel"). -page(1,"",1). -box('black',64,64,128,256,0,1,1,22,0,0,0,0,0,'1',[ -]). -box('black',80,96,112,224,26,1,1,23,0,0,0,0,0,'1',[ -]). -poly('black',2,[ - 128,160,912,160],1,2,1,24,0,0,0,0,10,4,0,0,0,'2','10','4', - "0",[ -]). -box('black',912,96,944,224,26,1,1,88,0,0,0,0,0,'1',[ -]). -text('black',96,48,'Helvetica',0,17,1,1,0,1,21,15,89,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "File"]). -text('black',928,72,'Helvetica',0,17,1,1,0,1,32,15,93,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Buffer"]). -box('black',48,32,992,512,0,1,1,186,0,0,0,0,0,'1',[ -]). -poly('black',5,[ - 72,392,56,456,72,504,88,456,72,392],1,2,1,188,2,0,0,0,10,4,0,0,0,'2','10','4', - "",[ -]). -text('black',96,448,'Helvetica',0,17,1,0,0,1,46,15,189,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "== Loop"]). -poly('black',3,[ - 48,384,152,384,152,512],0,1,1,191,0,0,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -text('black',480,40,'Helvetica',0,24,1,1,0,1,372,29,197,0,24,5,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Fig 4: Regularly Chunked Storage"]). -text('black',136,144,'Helvetica',0,17,1,1,0,1,9,15,201,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "A"]). -text('black',480,104,'Helvetica',0,17,1,1,0,1,86,15,281,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5F_arr_read()"]). -text('black',480,120,'Helvetica',0,17,1,1,0,1,102,15,349,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5F_istore_read()"]). -text('black',480,136,'Helvetica',0,17,1,1,0,1,167,15,351,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5F_istore_copy_hyperslab()"]). -poly('black',5,[ - 160,160,144,224,160,272,176,224,160,160],1,2,1,362,2,0,0,0,10,4,0,0,0,'2','10','4', - "",[ -]). -poly('black',5,[ - 880,160,864,224,880,272,896,224,880,160],1,2,1,363,2,0,0,0,10,4,0,0,0,'2','10','4', - "",[ -]). -box('black',448,192,512,256,26,1,1,364,0,0,0,0,0,'1',[ -]). -text('black',480,176,'Helvetica',0,17,1,1,0,1,43,15,367,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "CHUNK"]). -poly('black',2,[ - 160,224,448,224],1,2,1,372,0,0,0,0,10,4,0,0,0,'2','10','4', - "0",[ -]). -poly('black',2,[ - 512,224,880,224],1,2,1,373,0,0,0,0,10,4,0,0,0,'2','10','4', - "0",[ -]). -text('black',288,224,'Helvetica',0,17,1,1,0,1,101,15,385,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5F_block_read()"]). -text('black',288,240,'Helvetica',0,17,1,1,0,1,90,15,387,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5F_low_read()"]). -text('black',288,256,'Helvetica',0,17,1,1,0,1,98,15,391,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5F_sec2_read()"]). -text('black',288,272,'Helvetica',0,17,1,1,0,1,33,15,395,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "read()"]). -poly('black',5,[ - 456,256,448,296,480,320,512,296,504,256],1,2,1,401,2,0,0,0,10,4,0,0,0,'2','10','4', - "",[ -]). -text('black',184,208,'Helvetica',0,17,1,1,0,1,8,15,422,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "B"]). -text('black',520,208,'Helvetica',0,17,1,1,0,1,9,15,434,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "D"]). -text('black',440,272,'Helvetica',0,17,1,1,0,1,9,15,440,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "C"]). -text('black',480,320,'Helvetica',0,17,1,1,0,1,107,15,444,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5Z_uncompress()"]). -text('black',672,224,'Helvetica',0,17,1,1,0,1,107,15,454,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5V_hyper_copy()"]). -text('black',672,240,'Helvetica',0,17,1,1,0,1,106,15,464,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5V_stride_copy()"]). -text('black',672,256,'Helvetica',0,17,1,1,0,1,54,15,466,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "memcpy()"]). -text('black',168,488,'Helvetica',0,17,1,0,0,1,282,15,471,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "NOTE: H5Z_uncompress() is not implemented yet."]). diff --git a/doc/html/TechNotes/pipe5.gif b/doc/html/TechNotes/pipe5.gif deleted file mode 100644 index 6ae0098..0000000 Binary files a/doc/html/TechNotes/pipe5.gif and /dev/null differ diff --git a/doc/html/TechNotes/pipe5.obj b/doc/html/TechNotes/pipe5.obj deleted file mode 100644 index 4738bbd..0000000 --- a/doc/html/TechNotes/pipe5.obj +++ /dev/null @@ -1,52 +0,0 @@ -%TGIF 3.0-p5 -state(1,33,100,0,0,0,8,1,9,1,1,1,2,1,0,1,0,'Helvetica',0,17,0,0,0,10,0,0,1,1,0,16,0,0,1,1,1,0,1408,1088,0,0,2880). -% -% @(#)$Header$ -% %W% -% -unit("1 pixel/pixel"). -page(1,"",1). -box('black',64,64,128,256,0,1,1,22,0,0,0,0,0,'1',[ -]). -box('black',80,96,112,224,26,1,1,23,0,0,0,0,0,'1',[ -]). -poly('black',2,[ - 128,160,912,160],1,2,1,24,0,0,0,0,10,4,0,0,0,'2','10','4', - "0",[ -]). -box('black',912,96,944,224,26,1,1,88,0,0,0,0,0,'1',[ -]). -text('black',96,48,'Helvetica',0,17,1,1,0,1,21,15,89,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "File"]). -text('black',928,72,'Helvetica',0,17,1,1,0,1,32,15,93,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Buffer"]). -box('black',48,32,992,512,0,1,1,186,0,0,0,0,0,'1',[ -]). -text('black',480,40,'Helvetica',0,24,1,1,0,1,333,29,197,0,24,5,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Fig 5: Reading a Single Chunk"]). -text('black',136,144,'Helvetica',0,17,1,1,0,1,9,15,201,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "A"]). -text('black',480,112,'Helvetica',0,17,1,1,0,1,86,15,281,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5F_arr_read()"]). -text('black',480,128,'Helvetica',0,17,1,1,0,1,102,15,349,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5F_istore_read()"]). -text('black',480,144,'Helvetica',0,17,1,1,0,1,167,15,351,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5F_istore_copy_hyperslab()"]). -text('black',480,160,'Helvetica',0,17,1,1,0,1,101,15,385,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5F_block_read()"]). -text('black',480,176,'Helvetica',0,17,1,1,0,1,90,15,387,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5F_low_read()"]). -text('black',480,192,'Helvetica',0,17,1,1,0,1,98,15,391,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5F_sec2_read()"]). -text('black',480,208,'Helvetica',0,17,1,1,0,1,33,15,395,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "read()"]). -text('black',864,240,'Helvetica',0,17,1,1,0,1,107,15,444,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5Z_uncompress()"]). -text('black',56,488,'Helvetica',0,17,1,0,0,1,282,15,471,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "NOTE: H5Z_uncompress() is not implemented yet."]). -poly('black',5,[ - 912,176,864,176,840,208,872,232,912,216],1,2,1,490,2,0,0,0,10,4,0,0,0,'2','10','4', - "",[ -]). -text('black',896,184,'Helvetica',0,17,1,0,0,1,8,15,491,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "B"]). diff --git a/doc/html/TechNotes/shuffling-algorithm-report.pdf b/doc/html/TechNotes/shuffling-algorithm-report.pdf deleted file mode 100755 index 459653c..0000000 Binary files a/doc/html/TechNotes/shuffling-algorithm-report.pdf and /dev/null differ diff --git a/doc/html/TechNotes/version.gif b/doc/html/TechNotes/version.gif deleted file mode 100644 index 41d4401..0000000 Binary files a/doc/html/TechNotes/version.gif and /dev/null differ diff --git a/doc/html/TechNotes/version.obj b/doc/html/TechNotes/version.obj deleted file mode 100644 index 96b5b7f..0000000 --- a/doc/html/TechNotes/version.obj +++ /dev/null @@ -1,96 +0,0 @@ -%TGIF 3.0-p5 -state(0,33,100,0,0,0,8,1,9,1,1,0,2,1,0,1,0,'Courier',0,17,0,0,0,10,0,0,1,1,0,16,0,0,1,1,1,0,1088,1408,0,0,2880). -% -% @(#)$Header$ -% %W% -% -unit("1 pixel/pixel"). -page(1,"",1). -poly('black',2,[ - 128,128,128,448],0,3,1,0,0,0,0,0,12,5,0,0,0,'3','12','5', - "0",[ -]). -poly('black',2,[ - 128,128,128,64],0,3,1,1,0,0,2,0,12,5,0,0,0,'3','12','5', - "0",[ -]). -poly('black',2,[ - 128,448,128,512],0,3,1,4,0,0,2,0,12,5,0,0,0,'3','12','5', - "0",[ -]). -text('black',144,112,'Courier',0,17,1,0,0,1,42,14,22,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "1.3.30"]). -text('black',144,144,'Courier',0,17,1,0,0,1,42,14,30,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "1.3.31"]). -text('black',144,176,'Courier',0,17,1,0,0,1,42,14,32,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "1.3.32"]). -poly('black',2,[ - 256,208,256,448],0,3,1,34,0,0,0,0,12,5,0,0,0,'3','12','5', - "0",[ -]). -poly('black',2,[ - 256,448,256,512],0,3,1,36,0,0,2,0,12,5,0,0,0,'3','12','5', - "0",[ -]). -poly('black',2,[ - 128,192,256,208],1,1,1,37,0,0,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -text('black',144,224,'Courier',0,17,1,0,0,1,42,14,41,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "1.3.33"]). -text('black',144,256,'Courier',0,17,1,0,0,1,42,14,43,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "1.3.34"]). -text('black',272,224,'Courier',0,17,1,0,0,1,35,14,45,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "1.5.0"]). -text('black',272,256,'Courier',0,17,1,0,0,1,35,14,47,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "1.5.1"]). -text('black',272,288,'Courier',0,17,1,0,0,1,35,14,49,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "1.5.2"]). -text('black',272,320,'Courier',0,17,1,0,0,1,35,14,51,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "1.5.3"]). -text('black',144,288,'Courier',0,17,1,0,0,1,42,14,53,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "1.3.35"]). -text('black',144,320,'Courier',0,17,1,0,0,1,35,14,57,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "1.4.0"]). -text('black',144,368,'Courier',0,17,1,0,0,1,35,14,59,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "1.4.1"]). -text('black',272,192,'Helvetica',0,17,1,0,0,1,144,15,67,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "New development branch"]). -text('black',144,64,'Helvetica',0,17,1,0,0,1,163,15,69,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Original development branch"]). -text('black',16,208,'Helvetica',0,17,2,0,0,1,87,30,71,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Feature Freeze", - "at this point."]). -text('black',16,320,'Helvetica',0,17,2,0,0,1,84,30,73,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Public Release", - "at this point."]). -poly('black',2,[ - 104,208,128,208],1,1,1,77,0,0,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -poly('black',2,[ - 104,320,128,320],1,1,1,78,0,0,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -poly('black',2,[ - 256,336,128,352],1,1,1,79,0,0,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -text('black',320,368,'Helvetica',0,17,3,0,0,1,137,45,82,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Merge a bug fix from the", - "development branch to", - "the release branch."]). -box('black',312,368,464,416,0,1,1,87,0,0,0,0,0,'1',[ -]). -poly('black',4,[ - 312,392,240,384,296,344,232,344],1,1,1,90,1,0,0,0,8,3,0,0,0,'1','8','3', - "6",[ -]). -box('black',8,208,104,240,0,1,1,95,0,0,0,0,0,'1',[ -]). -box('black',8,320,104,352,0,1,1,98,0,0,0,0,0,'1',[ -]). -text('black',144,408,'Courier',0,17,1,0,0,1,35,14,102,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "1.4.2"]). -box('black',0,40,480,528,0,1,1,104,0,0,0,0,0,'1',[ -]). diff --git a/doc/html/Tools.html b/doc/html/Tools.html deleted file mode 100644 index 21f967a..0000000 --- a/doc/html/Tools.html +++ /dev/null @@ -1,2760 +0,0 @@ - - -HDF5/Tools API Specification - - - - - - - - - - - - -
      -
      - - - -
      -HDF5 documents and links 
      -Introduction to HDF5 
      -HDF5 User Guide 
      - -
      -And in this document, the -HDF5 Reference Manual   -
      -H5IM   -H5LT   -H5PT   -H5TB   -
      -H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5I   -H5P   -
      -H5R   -H5S   -H5T   -H5Z   -Tools   -Datatypes   -
      -
      -
      -

      HDF5 Tools

      -
      - - - -

      HDF5 Tool Interfaces

      -

      - -HDF5-related tools are available to assist the user in a variety of -activities, including - examining or managing HDF5 files, - converting raw data between HDF5 and other special-purpose formats, - moving data and files between the HDF4 and HDF5 formats, - measuring HDF5 library performance, and - managing HDF5 library and application compilation, - installation and configuration. -Unless otherwise specified below, these tools are distributed and -installed with HDF5. - - -

        -
      • User utilities: -
          -
        • h5dump -- - Enables a user to examine the contents of an HDF5 file - and dump those contents to an ASCII file -
        • h5ls -- - Lists specified features of HDF5 file contents -
        • h5diff -- - Compares two HDF5 files and reports the differences. -
        • h5repack -- Copies an HDF5 file to a new - file with or without compression/chunking. -
        • h5perf -- - Measures HDF5 performance -
        • h5repart -- - Repartitions a file, creating a family of files -

          -
        - -
      • Configuration and library management utilities: -
          -
        • h5redeploy -- - Updates HDF5 compiler tools after an HDF5 software installation - in a new location -
        • h5cc -- - Simplifies the compilation of HDF5 programs written in C -
        • h5fc -- - Simplifies the compilation of HDF5 programs written in Fortran90 -
        • h5c++ -- - Simplifies the compilation of HDF5 programs written in C++ -

          -
        - -
      • Java-based tools for HDF5 - for viewing, manipulating, and generating HDF4 and HDF5 files: -
        - (Distributed separately; external link is - http://hdf.ncsa.uiuc.edu/hdf-java-html/) -
          -
        • HDFview -- a browser that - works with both HDF4 and HDF5 files and - can be used to transfer data between the two formats -
        • Java interfaces for both the HDF4 and HDF5 libraries -
        • Other HDF4- and HDF5-related products -

          -
        - -
      • Data conversion utilities: -
          -
        • h5import -- - Imports data into an existing or new HDF5 file -
        • gif2h5 -- - Converts a GIF file to an HDF5 file -
        • h52gif -- - Converts images in an HDF5 file to a GIF file -

          -
        - -
      • - HDF5/HDF4 conversion tools: -
        - (Distributed separately; external link is - http://hdf.ncsa.uiuc.edu/h4toh5/) -
          -
        • H4toH5 Conversion Library -- - Provides APIs for use in tools that perform customized - conversions of HDF4 files to HDF5 files -
        • h5toh4 -- - Converts an HDF5 file to an HDF4 file -
        • h4toh5 -- - Converts an HDF4 file to an HDF5 file -

          -
        - -
      • Other tools, - including third-party and commercial utilities and applications -
        - (Distributed separately; external link is - http://hdf.ncsa.uiuc.edu/tools5.html) - -
      - - - - - -

      - -


      -
      -
      Tool Name: h5dump -
      Syntax: -
      h5dump - [OPTIONS] file -
      Purpose: -
      Displays HDF5 file contents. -
      Description: -
      h5dump enables the user to examine - the contents of an HDF5 file and dump those contents, in human - readable form, to an ASCII file. -

      - h5dump dumps HDF5 file content to standard output. - It can display the contents of the entire HDF5 file or - selected objects, which can be groups, datasets, a subset of a - dataset, links, attributes, or datatypes. -

      - The --header option displays object header - information only. -

      - Names are the absolute names of the objects. h5dump - displays objects in the order same as the command order. If a - name does not start with a slash, h5dump begins - searching for the specified object starting at the root group. -

      - If an object is hard linked with multiple names, - h5dump displays the content of the object in the - first occurrence. Only the link information is displayed in later - occurrences. -

      - h5dump assigns a name for any unnamed datatype in - the form of - #oid1:oid2, where - oid1 and oid2 are the object identifiers - assigned by the library. The unnamed types are displayed within - the root group. -

      - Datatypes are displayed with standard type names. For example, - if a dataset is created with H5T_NATIVE_INT type - and the standard type name for integer on that machine is - H5T_STD_I32BE, h5dump displays - H5T_STD_I32BE as the type of the dataset. -

      - h5dump can also dump a subset of a dataset. - This feature operates in much the same way as hyperslabs in HDF5; - the parameters specified on the command line are passed to the - function - H5Sselect_hyperslab and the resulting selection - is displayed. -

      - The h5dump output is described in detail in the - DDL for HDF5, the - Data Description Language document. -

      - Note: It is not permissible to specify multiple - attributes, datasets, datatypes, groups, or soft links with one - flag. For example, one may not issue the command -
      -          - WRONG:   - h5dump -a /attr1 /attr2 foo.h5 -
      - to display both /attr1 and /attr2. - One must issue the following command: -
      -          - CORRECT:   - h5dump -a /attr1 -a /attr2 foo.h5 -
      -

      - It's possible to select the file driver with which to open the - HDF5 file by using the --filedriver (-f) command-line option. - Acceptable values for the --filedriver option are: "sec2", - "family", "split", "multi", and "stream". If the file driver flag - isn't specified, then the file will be opened with each driver in - turn and in the order specified above until one driver succeeds - in opening the file. -

      -

      - One byte integer type data is displayed in decimal by default. When - displayed in ASCII, a non-printable code is displayed in 3 octal - digits preceeded by a back-slash unless there is a C language escape - sequence for it. For example, CR and LF are printed as \r and \n. - Though the NUL code is represented as \0 in C, it is printed as - \000 to avoid ambiguity as illustrated in the following 1 byte - char data (since this is not a string, embedded NUL is possible). -

      -	141 142 143 000 060 061 062 012
      -	  a   b   c  \0   0   1   2  \n 
      - h5dump prints them as "abc\000012\n". But if h5dump prints NUL as \0, - the output is "abc\0012\n" which is ambiguous. -

      - - -
      XML Output: -
      With the --xml option, h5dump generates - XML output. This output contains a complete description of the file, - marked up in XML. The XML conforms to the HDF5 Document Type - Definition (DTD) available at - - http://hdf.ncsa.uiuc.edu/DTDs/HDF5-File.dtd. -

      - The XML output is suitable for use with other tools, including the - HDF5 Java Tools. - -

      Options and Parameters: -
        - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        -h   or   --helpPrint a usage message and exit.
        -B   or   --bootblockPrint the content of the boot block.
        (This - option is not yet implemented.)
        -H   or   --headerPrint the header only; no data is displayed.
        -APrint the header and value of attributes; data of datasets - is not displayed.
        -i   or   --object-idsPrint the object ids.
        -r   or   --stringPrint 1-bytes integer datasets as ASCII.
        -V   or   --versionPrint version number and exit.
        -a P  or  --attribute=P    Print the specified attribute.
        -d P   or   - --dataset=PPrint the specified dataset.
        -f D  or  --filedriver=D        Specify which driver to open the file with.
        -g P   or   - --group=PPrint the specified group and all members.
        -l P  or  --soft-link=P    Print the value(s) of the specified soft link.
        -o F   or   - --output=FOutput raw data into file F.
        -t T   or   - --datatype=TPrint the specified named datatype.
        -w N   or   - --width=NSet the number of columns of output.
        -x   or   - --xmlOutput XML using XML schema (default) instead of DDL.
        -u   or   - --use-dtdOutput XML using XML DTD instead of DDL.
        -D U   or   - --xml-dtd=UIn XML output, refer to the DTD or schema at U - instead of the default schema/DTD.
        -X S   or   - --xml-dns=SIn XML output, (XML Schema) use qualified names in - the XML:
              ":": no namespace, default: - "hdf5:"
        -s L   or   - --start=LOffset of start of subsetting selection.
        - Default: the beginning of the dataset.
        -S L   or   - --stride=LHyperslab stride.
        - Default: 1 in all dimensions.
        -c L   or   - --count=LNumber of blocks to include in the selection.
        -k L   or   - --block=LSize of block in hyperslab.
        - Default: 1 in all dimensions.
        --Indicate that all following arguments are non-options. - E.g., to dump a file called `-f', use h5dump -- -f.
        fileThe file to be examined.
        -
      -

      - -
        - The option parameters listed above are defined as follows: - - - - - - - - - - - - - - - - - - - - - - -
        D    which file driver to use in opening the - file. Acceptable values are "sec2", "family", "split", - "multi", and "stream". Without the file driver flag the - file will be opened with each driver in turn and in the - order specified above until one driver succeeds in - opening the file.
        PThe full path from the root group to - the object
        TThe name of the datatype
        FA filename
        NAn integer greater than 1
        LA list of integers, the number of which is - equal to the number of dimensions in the dataspace being - queried
        UA URI (as defined in - [IETF RFC 2396], - updated by - [IETF RFC 2732]) - that refers to the DTD to be used to validate the XML
        - -

        Subsetting parameters can also be expressed in a convenient - compact form, as follows: -
        -          - --dataset="/foo/mydataset[START;STRIDE;COUNT;BLOCK]" -
        - All of the semicolons (;) are required, even when - a parameter value is not specified. - When not specified, default parameter values are used. -

      -
      Examples: - - - -
      -
        -
      1. Dumping the group /GroupFoo/GroupBar in the file - quux.h5:
        -      - h5dump -g /GroupFoo/GroupBar quux.h5 -
        -
        -
      2. Dumping the dataset Fnord in the group - /GroupFoo/GroupBar in the file quux.h5:
        -      - h5dump -d /GroupFoo/GroupBar/Fnord quux.h5 -
        -
        -
      3. Dumping the attribute metadata of the dataset - Fnord which is in group - /GroupFoo/GroupBar in the file quux.h5:
        -      - h5dump -a /GroupFoo/GroupBar/Fnord/metadata quux.h5 -
        -
        -
      4. Dumping the attribute metadata which is an - attribute of the root group in the file quux.h5:
        -      - h5dump -a /metadata quux.h5 -
        -
        -
      5. Producing an XML listing of the file bobo.h5:
        -      - h5dump --xml bobo.h5 > bobo.h5.xml -
        -
        -
      6. Dumping a subset of the dataset /GroupFoo/databar/ - in the file quux.h5:
        -      - h5dump -d /GroupFoo/databar --start="1,1" --stride="2,3" -
                 - --count="3,19" --block="1,1" quux.h5
        -
        -
        -
      7. The same example using the short form to specify the - subsetting parameters:
        -      - h5dump -d "/GroupFoo/databar[1,1;2,3;3,19;1,1]" quux.h5 -
        -
        -
      - -
      Current Status: -
      The current version of h5dump displays the - following information: -
        -
      • Group -
          -
        • group attribute (see Attribute) -
        • group member -
        -
      • Dataset -
          -
        • dataset attribute (see Attribute) -
        • dataset type (see Datatype) -
        • dataset space (see Dataspace) -
        • dataset data -
        -
      • Attribute -
          -
        • attribute type (see Datatype) -
        • attribute space (see Dataspace) -
        • attribute data -
        -
      • Datatype -
          -
        • integer type -
          - - H5T_STD_I8BE, H5T_STD_I8LE, H5T_STD_I16BE, ... -
        • floating point type -
          - - H5T_IEEE_F32BE, H5T_IEEE_F32LE, H5T_IEEE_F64BE, ... -
        • string type -
        • compound type -
          - - named, unnamed and transient compound type -
          - - integer, floating or string type member -
        • opaque types -
        • reference type -
          - - object references -
          - - data regions -
        • enum type -
        • variable-length datatypes -
          - - atomic types only -
          - - scalar or single dimensional array of variable-length - types supported -
        -
      • Dataspace -
          -
        • scalar and simple space -
        -
      • Soft link -
      • Hard link -
      • Loop detection -
      - -
      See Also: - - - -
      - - - -
      -
      -
      Tool Name: h5ls -
      Syntax: -
      h5ls - [OPTIONS] - file - [OBJECTS...] -
      Purpose: -
      Prints information about a file or dataset. -
      Description: -
      h5ls prints selected information about file objects - in the specified format. -
      Options and Parameters: -
        - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        -h  or  -?  or   - --help    Print a usage message and exit.
        -a   or   - --addressPrint addresses for raw data.
        -d   or   --dataPrint the values of datasets.
        -e   or   - --errorsShow all HDF5 error reporting.
        -f   or   - --fullPrint full path names instead of base names.
        -g   or   - --groupShow information about a group, not its contents.
        -l   or   - --labelLabel members of compound datasets.
        -r   or  --recursive    List all groups recursively, avoiding cycles.
        -s   or   - --stringPrint 1-bytes integer datasets as ASCII.
        -S   or   - --simpleUse a machine-readable output format.
        -wN   or   - --width=N    Set the number of columns of output.
        -v   or   - --verboseGenerate more verbose output.
        -V   or   - --versionPrint version number and exit.
        -x   or   - --hexdumpShow raw data in hexadecimal format.
        fileThe file name may include a printf(3C) integer format - such as %%05d to open a file family.
        objectsEach object consists of an HDF5 file name optionally - followed by a slash and an object name within the file - (if no object is specified within the file then the - contents of the root group are displayed). The file name - may include a printf(3C) integer format such - as "%05d" to open a file family.
      - -
      - - - -
      -
      -
      Tool Name: h5diff     -
      Syntax: -
      h5diff file1 file2 - [OPTIONS] - [object1 [object2 ] ] -
      Purpose: -
      Compares two HDF5 files and reports the differences. -
      Description: -
      h5diff is a command line tool that compares - two HDF5 files, file1 and file2, and - reports the differences between them.  -

      - Optionally, h5diff will compare two objects - within these files. - If only one object, object1, is specified, - h5diff will compare - object1 in file1 - with object1 in file2. - In two objects, object1 and object2, - are specified, h5diff will compare - object1 in file1 - with object2 in file2. - These objects must be HDF5 datasets. -

      -

      - object1 and object2 must be expressed - as absolute paths from the respective file's root group. -

      -

      - h5diff has the following four modes of output:
      - Normal mode: print the number of differences found and where they occurred
      - Report mode (-r): print the above plus the differences
      - Verbose mode (-v): print the above plus a list of objects and warnings
      - Quiet mode (-q): do not print output (h5diff always returns an exit code of - 1 when differences are found). -

      -

      - Additional information, with several sample cases, - can be found in the document - - H5diff Examples. -

      Options and Parameters: -
        - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        file1 
        file2The HDF5 files to be compared.
        -hhelp message.
        -rReport mode. Print the differences.
        -vVerbose mode. Print the differences, list of objects, warnings.
        -qQuiet mode. Do not print output.
        -n countPrint difference up to count - differences, then stop. count must be a positive integer.
        -d deltaPrint only differences that are greater than the - limit delta. delta must be a positive number. - The comparison criterion is whether the absolute value of the - difference of two corresponding values is greater than - delta -
        (e.g., |a–b| > delta, - where a is a value in file1 and - b is a value in file2).
        -p relative        Print only differences that are greater than a - relative error. relative must be a positive number. - The comparison criterion is whether the absolute value of the - difference 1 and the ratio of two corresponding values - is greater than relative - (e.g., |1–(b/a)| > relative - where a is a value in file1 and - b is a value in file2).
        object1 
        object2Specific object(s) within the files to be compared.
      -
      Examples: -
      The following h5diff call compares - the object /a/b in file1 - with the object /a/c in file2:
      -     h5diff file1 file2 /a/b /a/c -
      This h5diff call compares - the object /a/b in file1 - with the same object in file2:
      -     h5diff file1 file2 /a/b -
      And this h5diff call compares - all objects in both files:
      -     h5diff file1 file2 - -
      - - - -
      -
      -
      Tool Name: h5repack     -
      Syntax: -
      h5repack -i file1-o file2 [-h] [-v] [-f - 'filter'] [-l 'layout'][-m number][-e file] -
      Purpose: -
      Copies an HDF5 file to a new file with or without compression/chunking. -
      Description: -
      h5repack is a command line tool that applies HDF5 filters - to a input file file1, saving the output in a new file, file2.

      'filter' - is a string with the format 
      - <list of objects> : <name of filter> = <filter - parameters>.
      -
      -  <list of objects> is a comma separated list of object names - meaning apply compression only to those objects. If no object names are - specified, the filter is applied to all objects
      -  <name of filter> can be: 
      - GZIP, to apply the HDF5 GZIP filter (GZIP compression)
      - SZIP, to apply the HDF5 SZIP filter (SZIP compression)
      - SHUF, to apply the HDF5 shuffle filter
      - FLET, to apply the HDF5 checksum filter
      - NONE, to remove the filter 
      - <filter parameters> is optional compression info 
      - SHUF (no parameter) 
      - FLET (no parameter) 
      - GZIP=<deflation level> from 1-9 
      - SZIP=<pixels per block,coding> (pixels per block is a even number in - 2-32 and coding method is 'EC' or 'NN') -

       
      -
      'layout' is a string with the format
      -  <list of objects> : <layout type> 
      -
      - <list of objects> is a comma separated list of object names, meaning - that layout information is supplied for those objects. If no object names are - specified, the layout is applied to all objects 
      - <layout type> can be: 
      - CHUNK, to apply chunking layout 
      - COMPA, to apply compact layout 
      - CONTI, to apply continuous layout 
      - <layout parameters> is present for the chunk case only it is the chunk - size of each dimension: <dim_1 x dim_2 x ... dim_n>
      -
       
      -
      Options and Parameters: -
      -
      file1 -
      file2 -
      The input and output HDF5 files -
      -h -
      help message.
      -f filter -
      Filter type
      -l layout -
      Layout type
      -v -
      Verbose mode. Print output (list of objects in the file, filters - and layout applied).
      -e file -
      File with the -f and -l options (only filter and layout flags) -
      -d delta -
      Print only differences that are greater than the - limit delta. - delta must be a positive number. - The comparison criterion is whether the - absolute value of the difference of - two corresponding values is greater than - delta -
      (e.g., |a–b| > delta, - where a is a value in file1 and - b is a value in file2).
      -m number -
      Do not apply the filter to objects which size in bytes is - smaller than number. If no size is specified a minimum of 1024 - bytes is assumed. -
      -
      Examples: -
      1) h5repack -i file1 -o file2 -f GZIP=1 -v
      -         Applies GZIP compression to all - objects in file1 and saves the output in file2 -

      2) h5repack -i file1 -o file2 -f dset1:SZIP=8,NN -v
      -         Applies SZIP compression only - to object 'dset1'

      -

      3) h5repack -i file1 -o file2 -l dset1,dset2:CHUNK=20x10 -v
      -         Applies chunked layout to - objects 'dset1' and 'dset2'
      -

      - -

       

      -
      -
      -
      Tool Name: h5repart -
      Syntax: -
      h5repart - [-v] - [-V] - [-[b|m]N[g|m|k]] - [-family_to_sec2] - source_file - dest_file -
      Purpose: -
      Repartitions a file or family of files. -
      Description: -
      h5repart joins a family of files into a single file, - or copies one family of files to another while changing the size - of the family members. h5repart can also be used to - copy a single file to a single file with holes. At this stage, - h5repart can not split a single non-family file into - a family of file(s). -

      - To convert a family of file(s) to a single non-family file - (sec2 file), the option -family_to_sec2 - has to be used. -

      - Sizes associated with the -b and -m - options may be suffixed with g for gigabytes, - m for megabytes, or k for kilobytes. -

      - File family names include an integer printf - format such as %d. - -

      Options and Parameters: -
        - - - - - - - - - - - - - - - - - - - - - -
        -vProduce verbose output.
        -VPrint a version number and exit.
        -bNThe I/O block size, defaults to 1kB
        -mNThe destination member size or 1GB
        -family_to_sec2Convert file driver from family to sec2
        source_file    The name of the source file
        dest_fileThe name of the destination files
      - -
      - - - -
      -
      -
      Tool Name: h5import -
      Syntax: -
      h5import - infile in_options - [infile in_options ...] - -o outfile - -
      h5import - infile in_options - [infile in_options ...] - -outfile outfile - -
      h5import -h -
      h5import -help -
      Purpose: -
      Imports data into an existing or new HDF5 file. -
      Description: -
      h5import converts data - from one or more ASCII or binary files, infile, - into the same number of HDF5 datasets - in the existing or new HDF5 file, outfile. - Data conversion is performed in accordance with the - user-specified type and storage properties - specified in in_options. -

      - The primary objective of h5import is to - import floating point or integer data. - The utility's design allows for future versions that - accept ASCII text files and store the contents as a - compact array of one-dimensional strings, - but that capability is not implemented in HDF5 Release 1.6. - -

      - Input data and options:
      - Input data can be provided in one of the following forms: -

      • As an ASCII, or plain-text, file containing either - floating point or integer data -
      • As a binary file containing either 32-bit or - 64-bit native floating point data -
      • As a binary file containing native integer data, - signed or unsigned and - 8-bit, 16-bit, 32-bit, or 64-bit. -
      • As an ASCII, or plain-text, file containing text data. - (This feature is not implemented in HDF5 Release 1.6.) -
      - Each input file, infile, - contains a single n-dimensional - array of values of one of the above types expressed - in the order of fastest-changing dimensions first. -

      - Floating point data in an ASCII input file must be - expressed in the fixed floating form (e.g., 323.56) - h5import is designed to accept scientific notation - (e.g., 3.23E+02) in an ASCII, but that is not implemented in HDF5 release 1.6. -

      - Each input file can be associated with options specifying - the datatype and storage properties. - These options can be specified either as - command line arguments - or in a configuration file. - Note that exactly one of these approaches must be used with a - single input file. -

      - Command line arguments, best used with simple input files, - can be used to specify - the class, size, dimensions of the input data and - a path identifying the output dataset. -

      - The recommended means of specifying input data options - is in a configuration file; this is also the only means of - specifying advanced storage features. - See further discussion in "The configuration file" below. -

      - The only required option for input data is dimension sizes; - defaults are available for all others. -

      - h5import will accept up to 30 input files in a single call. - Other considerations, such as the maximum length of a command line, - may impose a more stringent limitation. - -

      - Output data and options:
      - The name of the output file is specified following - the -o or -output option - in outfile. - The data from each input file is stored as a separate dataset - in this output file. - outfile may be an existing file. - If it does not yet exist, h5import will create it. -

      - Output dataset information and storage properties can be - specified only by means of a configuration file. - - - - - - - - - - - - - - -
        - Dataset path - If the groups in the path leading to the dataset - do not exist, h5import will create them.
      - If no group is specified, the dataset will be created - as a member of the root group.
      - If no dataset name is specified, the default name is - dataset1 for the first input dataset, - dataset2 for the second input dataset, - dataset3 for the third input dataset, - etc.
      - h5import does not overwrite a pre-existing - dataset of the specified or default name. - When an existing dataset of a conflicting name is - encountered, h5import quits with an error; - the current input file and any subsequent input files - are not processed. -
        - Output type - Datatype parameters for output data -
        -     Output data class - Signed or unsigned integer or floating point -
        -     Output data size - 8-, 16-, 32-, or 64-bit integer
      - 32- or 64-bit floating point -
        -     Output architecture - IEEE
      - STD
      - NATIVE (Default)
      - Other architectures are included in the h5import design - but are not implemented in this release. -
        -     Output byte order - Little- or big-endian.
      - Relevant only if output architecture - is IEEE, UNIX, or STD; - fixed for other architectures. -
        - Dataset layout and storage  
      -         properties -
      Denote how raw data is to be organized on the disk. - If none of the following are specified, - the default configuration is contiguous layout and with no compression. -
        -     Layout - Contiguous (Default)
      - Chunked -
        -     External storage - Allows raw data to be stored in a non-HDF5 file or in an - external HDF5 file.
      - Requires contiguous layout. -
        -     Compressed - Sets the type of compression and the - level to which the dataset must be compressed.
      - Requires chunked layout. -
        -     Extendable - Allows the dimensions of the dataset increase over time - and/or to be unlimited.
      - Requires chunked layout. -
        -     Compressed and
      -         extendable -
      Requires chunked layout. -
        - -   -
      -

      - -

      - Command-line arguments:
      - The h5import syntax for the command-line arguments, - in_options, is as follows: - - -
           - h5import infile -d dim_list - [-p pathname] - [-t input_class] - [-s input_size] - [infile ...] - -o outfile
      - or
      - h5import infile -dims dim_list - [-path pathname] - [-type input_class] - [-size input_size] - [infile ...] - -outfile outfile
      - or
      - h5import infile -c config_file - [infile ...] - -outfile outfile -
      - Note the following: - If the -c config_file option is used with - an input file, no other argument can be used with that input file. - If the -c config_file option is not used with - an input data file, the -d dim_list argument - (or -dims dim_list) - must be used and any combination of the remaining options may be used. - Any arguments used must appear in exactly the order used - in the syntax declarations immediately above. - -

      - The configuration file:
      - A configuration file is specified with the - -c config_file option: - - -
           - h5import infile -c config_file - [infile -c config_file2 ...] - -outfile outfile -
      -

      - The configuration file is an ASCII file and must be - organized as "Configuration_Keyword Value" pairs, - with one pair on each line. - For example, the line indicating that - the input data class (configuration keyword INPUT-CLASS) - is floating point in a text file (value TEXTFP) - would appear as follows:
      -     INPUT-CLASS TEXTFP -

      - A configuration file may have the following keywords each - followed by one of the following defined values. - One entry for each of the first two keywords, - RANK and DIMENSION-SIZES, - is required; all other keywords are optional. - -

      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
      -
      Keyword  
          Value -

      Description -
      -
      RANK   -

      The number of dimensions in the dataset. (Required) -
      -     rank - An integer specifying the number of dimensions in the dataset.
      - Example:   4   for a 4-dimensional dataset. -
      -
      DIMENSION-SIZES -

      Sizes of the dataset dimensions. (Required) -
      -     dim_sizes - A string of space-separated integers - specifying the sizes of the dimensions in the dataset. - The number of sizes in this entry must match the value in - the RANK entry. - The fastest-changing dimension must be listed first.
      - Example:   4 3 4 38   for a 38x4x3x4 dataset. -
      -
      PATH -

      Path of the output dataset. -
      -     path - The full HDF5 pathname identifying the output dataset - relative to the root group within the output file.
      - I.e., path is a string consisting of - optional group names, each followed by a slash, - and ending with a dataset name. - If the groups in the path do no exist, they will be - created.
      - If PATH is not specified, the output dataset - is stored as a member of the root group and the - default dataset name is - dataset1 for the first input dataset, - dataset2 for the second input dataset, - dataset3 for the third input dataset, etc.
      - Note that h5import does not overwrite a - pre-existing dataset of the specified or default name. - When an existing dataset of a conflicting name is - encountered, h5import quits with an error; - the current input file and any subsequent input files - are not processed.
      - Example: The configuration file entry - - -
           - PATH grp1/grp2/dataset1 -
      - indicates that the output dataset dataset1 will - be written in the group grp2/ which is in - the group grp1/, - a member of the root group in the output file. -
      -
      INPUT-CLASS   -

      A string denoting the type of input data. -
      -     TEXTIN - Input is signed integer data in an ASCII file. -
      -     TEXTUIN - Input is unsigned integer data in an ASCII file. -
      -     TEXTFP - Input is floating point data in fixed notation (e.g., 325.34) - in an ASCII file. -
      -     TEXTFPE - Input is floating point data in scientific notation (e.g., 3.2534E+02) - in an ASCII file.
      - (Not implemented in this release.) -
      -     IN - Input is signed integer data in a binary file. -
      -     UIN - Input is unsigned integer data in a binary file. -
      -     FP - Input is floating point data in a binary file. (Default) -
      -     STR - Input is character data in an ASCII file. - With this value, the configuration keywords - RANK, DIMENSION-SIZES, - OUTPUT-CLASS, OUTPUT-SIZE, - OUTPUT-ARCHITECTURE, and OUTPUT-BYTE-ORDER - will be ignored.
      - (Not implemented in this release.) -
      -
      INPUT-SIZE -

      An integer denoting the size of the input data, in bits. -
      -     8
      -     16
      -     32
      -     64 -
      For signed and unsigned integer data: - TEXTIN, TEXTUIN, - IN, or UIN. - (Default: 32) -
      -     32
      -     64 -
      For floating point data: - TEXTFP, TEXTFPE, - or FP. - (Default: 32) -
      -
      OUTPUT-CLASS   -

      A string denoting the type of output data. -
      -     IN - Output is signed integer data.
      - (Default if INPUT-CLASS is - IN or TEXTIN) -
      -     UIN - Output is unsigned integer data.
      - (Default if INPUT-CLASS is - UIN or TEXTUIN) -
      -     FP - Output is floating point data.
      - (Default if INPUT-CLASS is not specified or is - FP, TEXTFP, or TEXTFPE) -
      -     STR - Output is character data, - to be written as a 1-dimensional array of strings.
      - (Default if INPUT-CLASS is STR)
      - (Not implemented in this release.) -
      -
      OUTPUT-SIZE -

      An integer denoting the size of the output data, in bits. -
      -     8
      -     16
      -     32
      -     64 -
      For signed and unsigned integer data: - IN or UIN. - (Default: Same as INPUT-SIZE, else 32) -
      -     32
      -     64 -
      For floating point data: - FP. - (Default: Same as INPUT-SIZE, else 32) -
      -
      OUTPUT-ARCHITECTURE -

      A string denoting the type of output architecture. -
      -     NATIVE
      -     STD
      -     IEEE
      -     INTEL *
      -     CRAY *
      -     MIPS *
      -     ALPHA *
      -     UNIX * -
      See the "Predefined Atomic Types" section - in the "HDF5 Datatypes" chapter - of the HDF5 User's Guide - for a discussion of these architectures.
      - Values marked with an asterisk (*) are not implemented in this release.
      - (Default: NATIVE) -
      -
      OUTPUT-BYTE-ORDER -

      A string denoting the output byte order. - This entry is ignored if the OUTPUT-ARCHITECTURE - is not specified or if it is not specified as IEEE, - UNIX, or STD. -
      -     BE - Big-endian. (Default) -
      -     LE - Little-endian. -
      -
      The following options are disabled by default, making - the default storage properties no chunking, no compression, - no external storage, and no extensible dimensions. -
      -
      CHUNKED-DIMENSION-SIZES
      -

      Dimension sizes of the chunk for chunked output data. -
      -     chunk_dims - A string of space-separated integers specifying the - dimension sizes of the chunk for chunked output data. - The number of dimensions must correspond to the value - of RANK.
      - The presence of this field indicates that the - output dataset is to be stored in chunked layout; - if this configuration field is absent, - the dataset will be stored in contiguous layout. -
      -
      COMPRESSION-TYPE -

      Type of compression to be used with chunked storage. - Requires that CHUNKED-DIMENSION-SIZES - be specified. -
      -     GZIP - Gzip compression.
      - Other compression algorithms are not implemented - in this release of h5import. -
      -
      COMPRESSION-PARAM -

      Compression level. - Required if COMPRESSION-TYPE is specified. -
      -     1 through 9 - Gzip compression levels: - 1 will result in the fastest compression - while 9 will result in the - best compression ratio.
      - (Default: 6. The default gzip compression level is 6; - not all compression methods will have a default level.) -
      -
      EXTERNAL-STORAGE -

      Name of an external file in which to create the output dataset. - Cannot be used with CHUNKED-DIMENSIONS-SIZES, - COMPRESSION-TYPE, OR MAXIMUM-DIMENSIONS. -
      -     external_file        - - - - A string specifying the name of an external file. -
      -
      MAXIMUM-DIMENSIONS -

      Maximum sizes of all dimensions. - Requires that CHUNKED-DIMENSION-SIZES be specified. -
      -     max_dims - A string of space-separated integers specifying the - maximum size of each dimension of the output dataset. - A value of -1 for any dimension implies - unlimited size for that particular dimension.
      - The number of dimensions must correspond to the value - of RANK.
      -


      - - - -

      Options and Parameters: - -
        - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        infile(s)Name of the Input file(s).
        in_optionsInput options. Note that while only the -dims argument - is required, arguments must used in the order in which they are listed below.
          -d dim_list 
          -dims dim_listInput data dimensions. - dim_list is a string of - comma-separated numbers with no spaces - describing the dimensions of the input data. - For example, a 50 x 100 2-dimensional array would be - specified as -dims 50,100.
        - Required argument: if no configuration file is used, - this command-line argument is mandatory.
          -p pathname 
          -pathname pathname   -
        -                       
        pathname is a string consisting of - one or more strings separated by slashes (/) - specifying the path of the dataset in the output file. - If the groups in the path do no exist, they will be - created.
        - Optional argument: if not specified, - the default path is - dataset1 for the first input dataset, - dataset2 for the second input dataset, - dataset3 for the third input dataset, - etc.
        - h5import does not overwrite a pre-existing - dataset of the specified or default name. - When an existing dataset of a conflicting name is - encountered, h5import quits with an error; - the current input file and any subsequent input files - are not processed.
          -t input_class 
          -type input_class  input_class specifies the class of the - input data and determines the class of the output data.
        - Valid values are as defined in the Keyword/Values table - in the section "The configuration file" above.
        - Optional argument: if not specified, - the default value is FP.
          -s input_size 
          -size input_sizeinput_size specifies the size in bits - of the input data and determines the size of the output data.
        - Valid values for signed or unsigned integers are - 8, 16, 32, and 64.
        - Valid values for floating point data are - 32 and 64.
        - Optional argument: if not specified, - the default value is 32.
          -c config_fileconfig_file specifies a - configuration file.
        - This argument replaces all other arguments except - infile and - -o outfile
          -h 
          -help - Prints the h5import usage summary:
        - h5import -h[elp], OR
        - h5import <infile> <options> [<infile> <options>...] -o[utfile] <outfile>

        - Then exits. -
        outfileName of the HDF5 output file.
      - - -
      Examples: -
      Using command-line arguments: - - -
      - h5import infile -dims 2,3,4 -type TEXTIN -size 32 -o out1 -
           - This command creates a file out1 containing - a single 2x3x4 32-bit integer dataset. - Since no pathname is specified, the dataset is stored - in out1 as /dataset1. -
      - h5import infile -dims 20,50 -path bin1/dset1 -type FP -size 64 -o out2 -
           - This command creates a file out2 containing - a single a 20x50 64-bit floating point dataset. - The dataset is stored in out2 as /bin1/dset1. -
      - -
      Sample configuration files:
      - The following configuration file specifies the following:
      - – The input data is a 5x2x4 floating point array in - an ASCII file.
      - – The output dataset will be saved in chunked layout, - with chunk dimension sizes of 2x2x2.
      - – The output datatype will be 64-bit floating point, - little-endian, IEEE.
      - – The output dataset will be stored in - outfile - at /work/h5/pkamat/First-set.
      - – The maximum dimension sizes of the output dataset - will be 8x8x(unlimited). -
      -            PATH work/h5/pkamat/First-set
      -            INPUT-CLASS TEXTFP
      -            RANK 3
      -            DIMENSION-SIZES 5 2 4
      -            OUTPUT-CLASS FP
      -            OUTPUT-SIZE 64
      -            OUTPUT-ARCHITECTURE IEEE
      -            OUTPUT-BYTE-ORDER LE
      -            CHUNKED-DIMENSION-SIZES 2 2 2 
      -            MAXIMUM-DIMENSIONS 8 8 -1
      -        
      - - The next configuration file specifies the following:
      - – The input data is a 6x3x5x2x4 integer array in - a binary file.
      - – The output dataset will be saved in chunked layout, - with chunk dimension sizes of 2x2x2x2x2.
      - – The output datatype will be 32-bit integer in - NATIVE format - (as the output architecture is not specified).
      - – The output dataset will be compressed using Gzip compression - with a compression level of 7.
      - – The output dataset will be stored in - outfile at /Second-set. -
      -            PATH Second-set
      -            INPUT-CLASS IN
      -            RANK 5
      -            DIMENSION-SIZES 6 3 5 2 4
      -            OUTPUT-CLASS IN
      -            OUTPUT-SIZE 32
      -            CHUNKED-DIMENSION-SIZES 2 2 2 2 2
      -            COMPRESSION-TYPE GZIP
      -            COMPRESSION-PARAM 7
      -        
      - - - -
      - - - -
      -
      -
      Tool Name: gif2h5 -
      Syntax: -
      gif2h5 - gif_file h5_file -
      Purpose: -
      Converts a GIF file to an HDF5 file. -
      Description: -
      gif2h5 accepts as input the GIF file gif_file - and produces the HDF5 file h5_file as output. - -
      Options and Parameters: -
        - - - - - - -
        gif_file    The name of the input GIF file
        h5_fileThe name of the output HDF5 file
      - -
      - - - -
      -
      -
      Tool Name: h52gif -
      Syntax: -
      h52gif - h5_file gif_file - -i h5_image - [-p h5_palette] -
      Purpose: -
      Converts an HDF5 file to a GIF file. -
      Description: -
      h52gif accepts as input the HDF5 file h5_file - and the names of images and associated palettes within that file - as input and produces the GIF file gif_file, - containing those images, as output. -

      - h52gif expects at least - one h5_image. - You may repeat -
      -      - -i h5_image - [-p h5_palette] -
      - up to 50 times, for a maximum of 50 images. - -

      Options and Parameters: -
        - - - - - - - - - - - - -
        h5_fileThe name of the input HDF5 file
        gif_fileThe name of the output GIF file
        -i h5_imageImage option, specifying the name of an HDF5 image or - dataset containing an image to be converted
        -p h5_palette    Palette option, specifying the name of an HDF5 dataset - containing a palette to be used in an image conversion
      - -
      - - - -
      -
      -
      Tool Name: h5toh4 -
      Syntax: -
      h5toh4 -h
      - h5toh4 - h5file - h4file
      - h5toh4 - h5file
      - h5toh4 -m - h5file1 - h5file2 - h5file3 ... -
      Purpose: -
      Converts an HDF5 file into an HDF4 file. -
      Description: -
      h5toh4 is an HDF5 utility which reads - an HDF5 file, h5file, and converts all - supported objects and pathways to produce an HDF4 file, - h4file. If h4file already exists, - it will be replaced. -

      - If only one file name is given, the name must end in - .h5 and is assumed to represent the - HDF5 input file. h5toh4 replaces the - .h5 suffix with .hdf to form - the name of the resulting HDF4 file and proceeds as above. - If a file with the name of the intended HDF4 file already - exists, h5toh4 exits with an error without - changing the contents of any file. -

      - The -m option allows multiple HDF5 file - arguments. Each file name is treated the same as the - single file name case above. -

      - The -h option causes the following - syntax summary to be displayed: -

                    h5toh4 file.h5 file.hdf
      -              h5toh4 file.h5
      -              h5toh4 -m file1.h5 file2.h5 ...
      - -

      - - The following HDF5 objects occurring in an HDF5 file are - converted to HDF4 objects in the HDF4 file: - -

        -
      • HDF5 group objects are converted into HDF4 Vgroup - objects. HDF5 hard links and soft links pointing to - objects are converted to HDF4 Vgroup references. -
      • HDF5 dataset objects of integer datatype are - converted into HDF4 SDS objects. These datasets - may have up to 32 fixed dimensions. - The slowest varying dimension may be extendable. - 8-bit, 16-bit, and 32-bit integer datatypes are - supported. -
      • HDF5 dataset objects of floating point datatype - are converted into HDF4 SDS objects. - These datasets may have up to 32 fixed dimensions. - The slowest varying dimension may be extendable. - 32-bit and 64-bit floating point datatypes are - supported. -
      • HDF5 dataset objects of single dimension and - compound datatype are converted into HDF4 Vdata - objects. The length of that single dimension may - be fixed or extendable. The members of the - compound datatype are constrained to be no more - than rank 4. -
      • HDF5 dataset objects of single dimension and fixed length string - datatype are converted into HDF4 Vdata objects. The HDF4 Vdata - is a single field whose order is the length of the HDF5 string - type. The number of records of the Vdata is the length of the - single dimension which may be fixed or extendable. -
      - - Other objects are not converted and are not recorded - in the resulting h4file. -

      - Attributes associated with any of the supported HDF5 - objects are carried over to the HDF4 objects. - Attributes may be of integer, floating point, or fixed length - string datatype and they may have up to 32 fixed dimensions. -

      - All datatypes are converted to big-endian. - Floating point datatypes are converted to IEEE format. - -

      Note: -
      The h5toh4 and h4toh5 utilities - are no longer part of the HDF5 product; - they are distributed separately through the page - - Converting between HDF (4.x) and HDF5. -

      - -
      Options and Parameters: -
        - - - - - - - - - - - - -
        -hDisplays a syntax summary.
        -mConverts multiple HDF5 files to multiple HDF4 files.
        h5file    The HDF5 file to be converted.
        h4fileThe HDF4 file to be created.
      - -
      - - - -
      -
      -
      Tool Name: h4toh5 -
      Syntax: -
      h4toh5 -h
      - h4toh5 - h4file - h5file
      - h4toh5 - h4file
      - -
      Purpose: -
      Converts an HDF4 file to an HDF5 file. -
      Description: -
      h4toh5 is a file conversion utility that reads - an HDF4 file, h4file (input.hdf for example), - and writes an HDF5 file, h5file (output.h5 - for example), containing the same data. -

      - If no output file h5file is specified, - h4toh5 uses the input filename to designate - the output file, replacing the extension .hdf - with .h5. - For example, if the input file scheme3.hdf is - specified with no output filename, h4toh5 will - name the output file scheme3.h5. -

      - - The -h option causes a syntax summary - similar to the following to be displayed: -

                    h4toh5 inputfile.hdf outputfile.h5
      -              h4toh5 inputfile.hdf                     
      -

      - Each object in the HDF4 file is converted to an equivalent - HDF5 object, according to the mapping described in - - Mapping HDF4 Objects to HDF5 Objects. - (If this mapping changes between HDF5 Library releases, a more up-to-date - version may be available at - - Mapping HDF4 Objects to HDF5 Objects on the HDF FTP server.) -

      - In this initial version, h4toh5 converts the following - HDF4 objects: -

      - - - - - - - - -
      - HDF4 Object - - Resulting HDF5 Object -
      - SDS - - Dataset -
      - GR, RI8, and RI24 image - - Dataset -
      - Vdata - - Dataset -
      - Vgroup - - Group -
      - Annotation - - Attribute -
      - Palette - - Dataset -
      -
      -
      Note: -
      The h4toh5 and h5toh4 utilities - are no longer part of the HDF5 product; - they are distributed separately through the page - - Converting between HDF (4.x) and HDF5. -

      -
      Options and Parameters: -
        - - - - - - - - - - -
        -hDisplays a syntax summary.
        h4file    The HDF4 file to be converted.
        h5fileThe HDF5 file to be created.
      - -
      - - - -
      -
      -
      Tool Name: h5perf -
      Syntax: -
      h5perf [-h | --help] -
      h5perf [options] - - -
      Purpose: -
      Tests Parallel HDF5 performance. -
      Description: -
      h5perf provides tools for testing the performance - of the Parallel HDF5 library. -

      - The following environment variables have the following - effects on H5perf behavior: - - - - -
           - HDF5_NOCLEANUP - If set, h5perf does not remove data files. - (Default: Remove)
        - HDF5_MPI_INFO - Must be set to a string containing a list of semi-colon separated - key=value pairs for the MPI INFO object.
      - Example:
        - HDF5_PARAPREFIX   - Sets the prefix for parallel output data files.
      - -

      Options and Parameters: - -
        - - - - - - - - - - - - -
        These terms are used as follows in this section:
        file  A filename
        sizeA size specifier, expressed as an integer - greater than or equal to 0 (zero) followed by a size indicator:
        -     K for kilobytes (1024 bytes)
        -     M for megabytes (1048576 bytes)
        -     G for gigabytes (1073741824 bytes)
        - Example: 37M specifies 37 megabytes or 38797312 bytes.
        NAn integer greater than or equal to 0 (zero)

        - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        -h, --help
                Prints a usage message and exits.
        -a size, --align=size
                Specifies the alignment of objects in the HDF5 file. - (Default: 1)
        -A api_list, --api=api_list
                Specifies which APIs to test. api_list - is a comma-separated list with the following valid values: - - - - - - - - - - - - - - - - -
             phdf5  Parallel HDF5
          mpiioMPI-I/O
          posixPOSIX
        - (Default: All APIs)

        - Example, --api=mpiio,phdf5 specifies that the MPI I/O - and parallel HDf5 APIs are to be monitored.
        -B size, --block-size=size
                Specifies the block size within the transfer - buffer. (Default: 128K)

        - Block size versus transfer buffer size: The transfer buffer size - is the size of a buffer in memory. The data in that buffer is broken - into block size pieces and written to the file.

        - Transfer block size is set by the -x (or --min-xfer-size) - and -X (or --max-xfer-size) options.
        - The pattern in which the blocks are written to the file is described - in the discussion of the -I (or --interleaved) - option.
        -c, --chunk
                Creates HDF5 datasets in chunked layout. (Default: - Off)
        -C, --collective
                Use collective I/O for the MPI I/O and - Parallel HDF5 APIs.
        - (Default: Off, i.e., independent I/O)

        - If this option is set and the MPI-I/O and PHDF5 APIs are in use, all - the blocks in each transfer buffer will be written at once with an - MPI derived type.
        -d N, --num-dsetsN
                Sets the number of datasets per file. (Default: 1)
        -D debug_flags, --debug=debug_flags
                Sets the debugging level. debug_flags - is a comma-separated list of debugging flags with the following valid - values: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
             1  Minimal debugging
          2Moderate debugging (“not quite everything”)
          3Extensive debugging (“everything”)
          4All possible debugging (“the kitchen sink”)
          rRaw data I/O throughput information
          tTimes, in additions to throughputs
          vVerify data correctness
        - (Default: No debugging)

        - Example: --debug=2,r,t specifies to run a moderate level - of debugging while collecting raw data I/O throughput information - and verifying the correctness of the data.
        -e size, --num-bytes=size
                Specifies the number of bytes per process per dataset. - (Default: 256K)
        -F N, --num-files=N
                Specifies the number of files. (Default: 1)
        -i N, --num-iterations=N
                Sets the number of iterations to perform. (Default: - 1)
        - - - - - - - - - - -
        -I, --interleaved
                Sets interleaved block I/O.
        - (Default: Contiguous block I/O)

        - Interleaved vs. Contiguous blocks in a parallel environment:
        - When contiguous blocks are written to a dataset, the dataset is divided - into m regions, where m is the number of processes - writing separate portions of the dataset. Each process then writes - data to its own region. When interleaved blocks are written to a dataset, - space for the first block of the first process is allocated in the - dataset, then space is allocated for the first block of the second - process, etc., until space has been allocated for the first block - of each process. Space is then allocated for the second block of the - first process, the second block of the second process, etc.

        - For example, in the case of a 4 process run with 1M bytes-per-process, - 256K transfer buffer size, and 64KB block size, 16 contiguous - blocks per process would be written to the file in the manner
        -     1111111111111111222222222222222233333333333333334444444444444444
        - while 16 interleaved blocks per process would be written to the file - as     1234123412341234123412341234123412341234123412341234123412341234
        - If collective I/O is turned on, all of the four blocks per transfer - buffer will be written in one collective I/O call.
        - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        -m, --mpi-posixSets use of MPI-posix driver for HDF5 I/O. (Default: - MPI-I/O driver)
        -n, --no-fillSpecifies to not write fill values to HDF5 datasets. - This option is supported only in HDF5 Release v1.6 or later.
        - (Default: Off, i.e., write fill values)
        -o file, --output=fileSets the output file for raw data to file. - (Default: None)
        -p N, --min-num-processes=NSets the minimum number of processes to be used. (Default: - 1)
        -P N, --max-num-processes=N
        -         -          -          -           -
        Sets the maximum number of processes to be used.
        - (Default: All MPI_COMM_WORLDprocesses)
        -T size, --threshold=sizeSets the threshold for alignment of objects in the - HDF5 file. (Default: 1)
        -w, --write-onlyPerforms only write tests, not read tests. (Default: - Read and write tests)
        -x size, --min-xfer-size=sizeSets the minimum transfer buffer size. (Default: 128K)
        -X size--max-xfer-size=size    Sets the maximum transfer buffer size. (Default: 1M)
        -
      - -
      - - - -
      -
      -
      Tool Name: h5redeploy -
      Syntax: -
      h5redeploy - [help | -help] -
      h5redeploy - [-echo] - [-force] - [-prefix=dir] - [-tool=tool] - [-show] -
      Purpose: -
      Updates HDF5 compiler tools after an HDF5 software installation - in a new location. -
      Description: -
      h5redeploy updates the HDF5 compiler tools after - the HDF5 software has been installed in a new location. - -
      Options and Parameters: -
        - - - - - - - - - - - - - - - - - - -
        help, -helpPrints a help message.
        -echoShows all the shell commands executed.
        -forcePerforms the requested action without offering any prompt - requesting confirmation.
        -prefix=dir    Specifies a new directory in which to find the - HDF5 subdirectories lib/ and include/. -
        (Default: current working directory)
        -tool=toolSpecifies the tool to update. tool must - be in the current directory and must be writable. -
        (Default: h5cc)
        -showShows all of the shell commands to be executed - without actually executing them.
      - -
      - - - -
      -
      -
      Tool Name: h5cc -
      Syntax: -
      h5cc - [OPTIONS] <compile line> -
      Purpose: -
      Helper script to compile HDF5 applications. -
      Description: -
      h5cc can be used in much the same way MPIch is used - to compile an HDF5 program. It takes care of specifying where the - HDF5 header files and libraries are on the command line. -

      - h5cc supersedes all other compiler scripts in that - if you've used them to compile the HDF5 library, then - h5cc also uses those scripts. For example, when - compiling an MPIch program, you use the mpicc - script. If you've built HDF5 using MPIch, then h5cc - uses the MPIch program for compilation. -

      - Some programs use HDF5 in only a few modules. It isn't necessary - to use h5cc to compile those modules which don't use - HDF5. In fact, since h5cc is only a convenience - script, you are still able to compile HDF5 modules in the normal - way. In that case, you will have to specify the HDF5 libraries - and include paths yourself.

      - - An example of how to use h5cc to compile the program - hdf_prog, which consists of modules - prog1.c and prog2.c and uses the HDF5 - shared library, would be as follows: - -
      -        # h5cc -c prog1.c
      -        # h5cc -c prog2.c
      -        # h5cc -shlib -o hdf_prog prog1.o prog2.o
      - -
      Options and Parameters: -
        - - - - - - - - - - - - - - - - - - - - - -
        -helpPrints a help message.
        -echoShow all the shell commands executed.
        -prefix=DIRUse the directory DIR to find the HDF5 - lib/ and include/ subdirectories. -
        - Default: prefix specified when configuring HDF5.
        -showShow the commands without executing them.
        -shlibCompile using shared HDF5 libraries.
        -noshlibCompile using static HDF5 libraries [default].
        <compile line>    The normal compile line options for your compiler. - h5cc uses the same compiler you used to compile HDF5. - Check your compiler's manual for more information on which - options are needed.
      -
      Environment Variables: -
      When set, these environment variables override some of the built-in - defaults of h5cc. - -
        - - - - - - - - - -
        HDF5_CCUse a different C compiler.
        HDF5_CLINKERUse a different linker.
        HDF5_USE_SHLIB=[yes|no]    Use shared version of the HDF5 library [default: no].
      - -
      - - - -
      -
      -
      Tool Name: h5fc -
      Syntax: -
      h5fc - [OPTIONS] <compile line> -
      Purpose: -
      Helper script to compile HDF5 Fortran90 applications. -
      Description: -

      - h5fc can be used in much the same way MPIch is used - to compile an HDF5 program. It takes care of specifying where the - HDF5 header files and libraries are on the command line. -

      - h5fc supersedes all other compiler scripts in that - if you've used them to compile the HDF5 Fortran library, then - h5fc also uses those scripts. For example, when - compiling an MPIch program, you use the mpif90 - script. If you've built HDF5 using MPIch, then h5fc - uses the MPIch program for compilation. -

      - Some programs use HDF5 in only a few modules. It isn't necessary - to use h5fc to compile those modules which don't use - HDF5. In fact, since h5fc is only a convenience - script, you are still able to compile HDF5 Fortran modules in the - normal way. In that case, you will have to specify the HDF5 libraries - and include paths yourself. -

      - An example of how to use h5fc to compile the program - hdf_prog, which consists of modules - prog1.f90 and prog2.f90 - and uses the HDF5 Fortran library, would be as follows: -

      -        # h5fc -c prog1.f90
      -        # h5fc -c prog2.f90
      -        # h5fc -o hdf_prog prog1.o prog2.o
      - -
      Options and Parameters: -
        - - - - - - - - - - - - - - - -
        -helpPrints a help message.
        -echoShow all the shell commands executed.
        -prefix=DIRUse the directory DIR to find HDF5 - lib/ and include/ subdirectories -
        - Default: prefix specified when configuring HDF5.
        -showShow the commands without executing them.
        <compile line>    The normal compile line options for your compiler. - h5fc uses the same compiler you used - to compile HDF5. Check your compiler's manual for - more information on which options are needed.
      -
      Environment Variables: -
      When set, these environment variables override some of the built-in - defaults of h5cc. -
        - - - - - - -
        HDF5_FCUse a different Fortran90 compiler.
        HDF5_FLINKER    Use a different linker.
      - -
      - - - -
      -
      -
      Tool Name: h5c++ -
      Syntax: -
      h5c++ - [OPTIONS] <compile line> -
      Purpose: -
      Helper script to compile HDF5 C++ applications. -
      Description: -

      - h5c++ can be used in much the same way MPIch is used - to compile an HDF5 program. It takes care of specifying where the - HDF5 header files and libraries are on the command line. -

      - h5c++ supersedes all other compiler scripts in that - if you've used one set of compiler scripts to compile the - HDF5 C++ library, then h5c++ uses those same scripts. - For example, when compiling an MPIch program, - you use the mpiCC script. -

      - Some programs use HDF5 in only a few modules. It isn't necessary - to use h5c++ to compile those modules which don't use - HDF5. In fact, since h5c++ is only a convenience - script, you are still able to compile HDF5 C++ modules in the - normal way. In that case, you will have to specify the HDF5 libraries - and include paths yourself. -

      - An example of how to use h5c++ to compile the program - hdf_prog, which consists of modules - prog1.cpp and prog2.cpp - and uses the HDF5 C++ library, would be as follows: -

      -        # h5c++ -c prog1.cpp
      -        # h5c++ -c prog2.cpp
      -        # h5c++ -o hdf_prog prog1.o prog2.o
      - -
      Options and Parameters: -
        - - - - - - - - - - - - - - - -
        -helpPrints a help message.
        -echoShow all the shell commands executed.
        -prefix=DIRUse the directory DIR to find HDF5 - lib/ and include/ subdirectories -
        - Default: prefix specified when configuring HDF5.
        -showShow the commands without executing them.
        <compile line>
        -                 
        The normal compile line options for your compiler. - h5c++ uses the same compiler you used - to compile HDF5. Check your compiler's manual for - more information on which options are needed.
      -
      Environment Variables: -
      When set, these environment variables override some of the built-in - defaults of h5c++. -
        - - - - - - -
        HDF5_CXXUse a different C++ compiler.
        HDF5_CXXLINKER    Use a different linker.
      - -
      - - - - - - - - - -
      -
      - - - -
      -HDF5 documents and links 
      -Introduction to HDF5 
      -HDF5 User Guide 
      - -
      -And in this document, the -HDF5 Reference Manual   -
      -H5IM   -H5LT   -H5PT   -H5TB   -
      -H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5I   -H5P   -
      -H5R   -H5S   -H5T   -H5Z   -Tools   -Datatypes   -
      -
      -
      -HDF Help Desk -
      -Describes HDF5 Release 1.7, the unreleased development branch; working toward HDF5 Release 1.8.0 -
      -
      - - - diff --git a/doc/html/Tutor/Contents.html b/doc/html/Tutor/Contents.html deleted file mode 100644 index d2584e5..0000000 --- a/doc/html/Tutor/Contents.html +++ /dev/null @@ -1,104 +0,0 @@ - - - - - - -
      -Return to HDF5 Doc Set -
      -
      - -
      - - - -Tutorial Title Page -
      - -
      -Introductory Topics -
      - - -
      -Advanced Topics - - - -
      -Additional Information - - - -
      -Full TOC -
      - - -
      -
      -Copyright, Etc.
      - - - - - diff --git a/doc/html/Tutor/ContentsAdd.html b/doc/html/Tutor/ContentsAdd.html deleted file mode 100644 index cd5d2e1..0000000 --- a/doc/html/Tutor/ContentsAdd.html +++ /dev/null @@ -1,54 +0,0 @@ - - - - - - -
      -Return to HDF5 Doc Set -
      -
      - -
      - - - - -(Short TOC) -
      - -Tutorial Title Page -
      - - -Introductory Topics -
      - - -Advanced Topics -
      - - -Additional Information -HDF5 Utilities -- h5ls and h5dump -Glossary -References -Example Programs -
      - - -Full TOC - - -
      -Copyright, Etc.
      - - - - - diff --git a/doc/html/Tutor/ContentsAdv.html b/doc/html/Tutor/ContentsAdv.html deleted file mode 100644 index ac27c0e..0000000 --- a/doc/html/Tutor/ContentsAdv.html +++ /dev/null @@ -1,57 +0,0 @@ - - - - - - -
      -Return to HDF5 Doc Set -
      -
      - -
      - - - -(Short TOC) -
      - -Tutorial Title Page -
      - - -Introductory Topics -
      - - -Advanced Topics -Compound Datatypes -Dataspace Selection - hyperslab -Dataspace Selection - Individual Points -References to Objects -References to Dataset Regions -Chunking and Extendible Datasets -Mounting Files -Group Iteration -
      - - -Additional Information -
      - - -Full TOC -
      - - -Copyright, Etc.
      - - - - - diff --git a/doc/html/Tutor/ContentsFull.html b/doc/html/Tutor/ContentsFull.html deleted file mode 100644 index d873f82..0000000 --- a/doc/html/Tutor/ContentsFull.html +++ /dev/null @@ -1,71 +0,0 @@ - - - - - - -
      -Return to HDF5 Doc Set -
      -
      - -
      - - - - -(Short TOC) -
      - -Tutorial Title Page - -
      -Introductory Topics -Introduction -HDF5 File Organization -The HDF5 API -Creating an HDF5 File -Creating a Dataset -Reading from and Writing to a Dataset -Creating an Attribute -Creating a Group -Creating Groups Using Absolute and Relative Names -Creating Datasets in a Group - -
      -Quiz Questions -Quiz Answers - - -
      -Advanced Topics -Compound Datatypes -Dataspace Selection - Hyperslab -Dataspace Selection - Individual Points -References to Objects -References to Dataset Regions -Chunking and Extendible Datasets -Mounting Files -Group Iteration - - -
      -Additional Information -HDF5 Utilities -- h5ls and h5dump -Glossary -References -Example Programs - - -
      -Copyright, Etc.
      - - - - - diff --git a/doc/html/Tutor/ContentsIntro.html b/doc/html/Tutor/ContentsIntro.html deleted file mode 100644 index 96bd716..0000000 --- a/doc/html/Tutor/ContentsIntro.html +++ /dev/null @@ -1,63 +0,0 @@ - - - - - - -
      -Return to HDF5 Doc Set -
      -
      - -
      - - - - -(Short TOC) -
      - -Tutorial Title Page -
      - -Introductory Topics -Introduction -HDF5 File Organization -The HDF5 API -Creating an HDF5 File -Creating a Dataset -Reading from and Writing to a Dataset -Creating an Attribute -Creating a Group -Creating Groups Using Absolute and Relative Names -Creating Datasets in a Group - -
      -Quiz Questions -Quiz Answers -
      - - -Advanced Topics -
      - - -Additional Information -
      - - -Full TOC -
      - - -Copyright, Etc.
      - - - - - diff --git a/doc/html/Tutor/Copyright.html b/doc/html/Tutor/Copyright.html deleted file mode 100644 index d488a10..0000000 --- a/doc/html/Tutor/Copyright.html +++ /dev/null @@ -1,117 +0,0 @@ - - - - - HDF5 Copyright Notice - - - - - - - -
      - -

      Copyright Notice and Statement for -
      -NCSA HDF5 (Hierarchical Data Format 5) Software Library and Utilities

      -
      -

      - - -NCSA HDF5 (Hierarchical Data Format 5) Software Library and Utilities -
      -Copyright 1998, 1999, 2000, 2001 by the Board of Trustees of the University of Illinois -
      -All rights reserved. -

      - -Contributors: National Center for Supercomputing Applications (NCSA) at -the University of Illinois at Urbana-Champaign (UIUC), Lawrence Livermore -National Laboratory (LLNL), Sandia National Laboratories (SNL), Los Alamos -National Laboratory (LANL), Jean-loup Gailly and Mark Adler (gzip library). -

      - -Redistribution and use in source and binary forms, with or without -modification, are permitted for any purpose (including commercial purposes) -provided that the following conditions are met: -

      - -

        -
      1. Redistributions of source code must retain the above copyright notice, -this list of conditions, and the following disclaimer. - -
      2. Redistributions in binary form must reproduce the above copyright -notice, this list of conditions, and the following disclaimer in the -documentation and/or materials provided with the distribution. - -
      3. In addition, redistributions of modified forms of the source or binary -code must carry prominent notices stating that the original code was -changed and the date of the change. - -
      4. All publications or advertising materials mentioning features or use of -this software are asked, but not required, to acknowledge that it was -developed by the National Center for Supercomputing Applications at the -University of Illinois at Urbana-Champaign and to credit the contributors. - -
      5. Neither the name of the University nor the names of the Contributors may -be used to endorse or promote products derived from this software without -specific prior written permission from the University or the Contributors. - -
      6. THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY AND THE CONTRIBUTORS "AS IS" -WITH NO WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED. In no event -shall the University or the Contributors be liable for any damages suffered -by the users arising out of the use of this software, even if advised of -the possibility of such damage. - -
      - - - - -
      -Portions of HDF5 were developed with support from the University of -California, Lawrence Livermore National Laboratory (UC LLNL). -The following statment applies to those portions of the product -and must be retained in any redistribution of source code, binaries, -documentation, and/or accompanying materials: - - -This work was partially produced at the University of California, -Lawrence Livermore National Laboratory (UC LLNL) under contract no. -W-7405-ENG-48 (Contract 48) between the U.S. Department of Energy (DOE) -and The Regents of the University of California (University) for the -operation of UC LLNL. -

      -DISCLAIMER: -This work was prepared as an account of work sponsored by an agency of the -United States Government. Neither the United States Government nor the -University of California nor any of their employees, makes any warranty, -express or implied, or assumes any liability or responsibility for the -accuracy, completeness, or usefulness of any information, apparatus, -product, or process disclosed, or represents that its use would not -infringe privately-owned rights. Reference herein to any specific -commercial products, process, or service by trade name, trademark, -manufacturer, or otherwise, does not necessarily constitute or imply its -endorsement, recommendation, or favoring by the United States Government -or the University of California. The views and opinions of authors -expressed herein do not necessarily state or reflect those of the United -States Government or the University of California, and shall not be used -for advertising or product endorsement purposes. -

      - - - - -
      - -
      -HDF Help Desk -
      - -Last modified: 7 June 2000 -
      - - - - diff --git a/doc/html/Tutor/Graphics/AddInfo.gif b/doc/html/Tutor/Graphics/AddInfo.gif deleted file mode 100644 index 335107f..0000000 Binary files a/doc/html/Tutor/Graphics/AddInfo.gif and /dev/null differ diff --git a/doc/html/Tutor/Graphics/AdvTopics.gif b/doc/html/Tutor/Graphics/AdvTopics.gif deleted file mode 100644 index 9cea247..0000000 Binary files a/doc/html/Tutor/Graphics/AdvTopics.gif and /dev/null differ diff --git a/doc/html/Tutor/Graphics/BLANK.gif b/doc/html/Tutor/Graphics/BLANK.gif deleted file mode 100644 index 03134e7..0000000 Binary files a/doc/html/Tutor/Graphics/BLANK.gif and /dev/null differ diff --git a/doc/html/Tutor/Graphics/ChunkExt.gif b/doc/html/Tutor/Graphics/ChunkExt.gif deleted file mode 100644 index 52f9130..0000000 Binary files a/doc/html/Tutor/Graphics/ChunkExt.gif and /dev/null differ diff --git a/doc/html/Tutor/Graphics/CompDTypes.gif b/doc/html/Tutor/Graphics/CompDTypes.gif deleted file mode 100644 index 0549b32..0000000 Binary files a/doc/html/Tutor/Graphics/CompDTypes.gif and /dev/null differ diff --git a/doc/html/Tutor/Graphics/Copy.gif b/doc/html/Tutor/Graphics/Copy.gif deleted file mode 100644 index 7df2e6a..0000000 Binary files a/doc/html/Tutor/Graphics/Copy.gif and /dev/null differ diff --git a/doc/html/Tutor/Graphics/CreateAttr.gif b/doc/html/Tutor/Graphics/CreateAttr.gif deleted file mode 100644 index 82c7f72..0000000 Binary files a/doc/html/Tutor/Graphics/CreateAttr.gif and /dev/null differ diff --git a/doc/html/Tutor/Graphics/CreateDset1.gif b/doc/html/Tutor/Graphics/CreateDset1.gif deleted file mode 100644 index 2641d88..0000000 Binary files a/doc/html/Tutor/Graphics/CreateDset1.gif and /dev/null differ diff --git a/doc/html/Tutor/Graphics/CreateDset2.gif b/doc/html/Tutor/Graphics/CreateDset2.gif deleted file mode 100644 index 294c0a5..0000000 Binary files a/doc/html/Tutor/Graphics/CreateDset2.gif and /dev/null differ diff --git a/doc/html/Tutor/Graphics/CreateFile.gif b/doc/html/Tutor/Graphics/CreateFile.gif deleted file mode 100644 index e07d0aa..0000000 Binary files a/doc/html/Tutor/Graphics/CreateFile.gif and /dev/null differ diff --git a/doc/html/Tutor/Graphics/CreateGrp1.gif b/doc/html/Tutor/Graphics/CreateGrp1.gif deleted file mode 100644 index 66be2fd..0000000 Binary files a/doc/html/Tutor/Graphics/CreateGrp1.gif and /dev/null differ diff --git a/doc/html/Tutor/Graphics/CreateGrp2.gif b/doc/html/Tutor/Graphics/CreateGrp2.gif deleted file mode 100644 index 93a5c87..0000000 Binary files a/doc/html/Tutor/Graphics/CreateGrp2.gif and /dev/null differ diff --git a/doc/html/Tutor/Graphics/Examples.gif b/doc/html/Tutor/Graphics/Examples.gif deleted file mode 100644 index d6b3ac6..0000000 Binary files a/doc/html/Tutor/Graphics/Examples.gif and /dev/null differ diff --git a/doc/html/Tutor/Graphics/FileOrg.gif b/doc/html/Tutor/Graphics/FileOrg.gif deleted file mode 100644 index a0f812d..0000000 Binary files a/doc/html/Tutor/Graphics/FileOrg.gif and /dev/null differ diff --git a/doc/html/Tutor/Graphics/FullTOC1.gif b/doc/html/Tutor/Graphics/FullTOC1.gif deleted file mode 100644 index 9a7a810..0000000 Binary files a/doc/html/Tutor/Graphics/FullTOC1.gif and /dev/null differ diff --git a/doc/html/Tutor/Graphics/FullTOC2.gif b/doc/html/Tutor/Graphics/FullTOC2.gif deleted file mode 100644 index 651e0c7..0000000 Binary files a/doc/html/Tutor/Graphics/FullTOC2.gif and /dev/null differ diff --git a/doc/html/Tutor/Graphics/Glossary.gif b/doc/html/Tutor/Graphics/Glossary.gif deleted file mode 100644 index 157208a..0000000 Binary files a/doc/html/Tutor/Graphics/Glossary.gif and /dev/null differ diff --git a/doc/html/Tutor/Graphics/H5API.gif b/doc/html/Tutor/Graphics/H5API.gif deleted file mode 100644 index 7d5c8a6..0000000 Binary files a/doc/html/Tutor/Graphics/H5API.gif and /dev/null differ diff --git a/doc/html/Tutor/Graphics/Intro.gif b/doc/html/Tutor/Graphics/Intro.gif deleted file mode 100644 index a9d299b..0000000 Binary files a/doc/html/Tutor/Graphics/Intro.gif and /dev/null differ diff --git a/doc/html/Tutor/Graphics/IntroTopics.gif b/doc/html/Tutor/Graphics/IntroTopics.gif deleted file mode 100644 index 384d7a7..0000000 Binary files a/doc/html/Tutor/Graphics/IntroTopics.gif and /dev/null differ diff --git a/doc/html/Tutor/Graphics/Iterate.gif b/doc/html/Tutor/Graphics/Iterate.gif deleted file mode 100644 index 0dc68d5..0000000 Binary files a/doc/html/Tutor/Graphics/Iterate.gif and /dev/null differ diff --git a/doc/html/Tutor/Graphics/Makefile.am b/doc/html/Tutor/Graphics/Makefile.am deleted file mode 100644 index 6e3a60e..0000000 --- a/doc/html/Tutor/Graphics/Makefile.am +++ /dev/null @@ -1,24 +0,0 @@ -# HDF5 Library Doc Makefile(.in) -# -# Copyright (C) 1997, 2002 -# National Center for Supercomputing Applications. -# All rights reserved. -# -## -## Makefile.am -## Run automake to generate a Makefile.in from this file. -# - -include $(top_srcdir)/config/commence-doc.am - -localdocdir = $(docdir)/hdf5/Tutor/Graphics - -# Public doc files (to be installed)... -localdoc_DATA=AddInfo.gif AdvTopics.gif BLANK.gif ChunkExt.gif CompDTypes.gif \ - Copy.gif CreateAttr.gif CreateDset1.gif CreateDset2.gif \ - CreateFile.gif CreateGrp1.gif CreateGrp2.gif Examples.gif \ - FileOrg.gif FullTOC1.gif FullTOC2.gif Glossary.gif H5API.gif \ - Intro.gif IntroTopics.gif Iterate.gif MountFile.gif Quiz.gif \ - QuizAns.gif RdWrDataset.gif RefObject.gif RefRegion.gif \ - References.gif SelectElemCp.gif SelectHyp.gif TOC.gif \ - TOCFull.gif TOCShort.gif TitlePg.gif Utilities.gif diff --git a/doc/html/Tutor/Graphics/Makefile.in b/doc/html/Tutor/Graphics/Makefile.in deleted file mode 100644 index df5259b..0000000 --- a/doc/html/Tutor/Graphics/Makefile.in +++ /dev/null @@ -1,493 +0,0 @@ -# Makefile.in generated by automake 1.9.5 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005 Free Software Foundation, Inc. -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# HDF5 Library Doc Makefile(.in) -# -# Copyright (C) 1997, 2002 -# National Center for Supercomputing Applications. -# All rights reserved. -# -# - -srcdir = @srcdir@ -top_srcdir = @top_srcdir@ -VPATH = @srcdir@ -pkgdatadir = $(datadir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -top_builddir = ../../../.. -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -INSTALL = @INSTALL@ -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ - $(top_srcdir)/config/commence-doc.am \ - $(top_srcdir)/config/commence.am -subdir = doc/html/Tutor/Graphics -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/configure.in -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(SHELL) $(top_srcdir)/bin/mkinstalldirs -CONFIG_HEADER = $(top_builddir)/src/H5config.h -CONFIG_CLEAN_FILES = -SOURCES = -DIST_SOURCES = -am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; -am__vpath_adj = case $$p in \ - $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ - *) f=$$p;; \ - esac; -am__strip_dir = `echo $$p | sed -e 's|^.*/||'`; -am__installdirs = "$(DESTDIR)$(localdocdir)" -localdocDATA_INSTALL = $(INSTALL_DATA) -DATA = $(localdoc_DATA) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) - -# Set the paths for AFS installs of autotools for Linux machines -# Ideally, these tools should never be needed during the build. -ACLOCAL = /afs/ncsa/projects/hdf/packages/automake_1.9.5/Linux_2.4/bin/aclocal -I /afs/ncsa/projects/hdf/packages/libtool_1.5.14/Linux_2.4/share/aclocal -ADD_PARALLEL_FILES = @ADD_PARALLEL_FILES@ -AMDEP_FALSE = @AMDEP_FALSE@ -AMDEP_TRUE = @AMDEP_TRUE@ -AMTAR = @AMTAR@ -AM_MAKEFLAGS = @AM_MAKEFLAGS@ -AR = @AR@ -AUTOCONF = /afs/ncsa/projects/hdf/packages/autoconf_2.59/Linux_2.4/bin/autoconf -AUTOHEADER = /afs/ncsa/projects/hdf/packages/autoconf_2.59/Linux_2.4/bin/autoheader -AUTOMAKE = /afs/ncsa/projects/hdf/packages/automake_1.9.5/Linux_2.4/bin/automake -AWK = @AWK@ -BUILD_CXX_CONDITIONAL_FALSE = @BUILD_CXX_CONDITIONAL_FALSE@ -BUILD_CXX_CONDITIONAL_TRUE = @BUILD_CXX_CONDITIONAL_TRUE@ -BUILD_FORTRAN_CONDITIONAL_FALSE = @BUILD_FORTRAN_CONDITIONAL_FALSE@ -BUILD_FORTRAN_CONDITIONAL_TRUE = @BUILD_FORTRAN_CONDITIONAL_TRUE@ -BUILD_HDF5_HL_CONDITIONAL_FALSE = @BUILD_HDF5_HL_CONDITIONAL_FALSE@ -BUILD_HDF5_HL_CONDITIONAL_TRUE = @BUILD_HDF5_HL_CONDITIONAL_TRUE@ -BUILD_PABLO_CONDITIONAL_FALSE = @BUILD_PABLO_CONDITIONAL_FALSE@ -BUILD_PABLO_CONDITIONAL_TRUE = @BUILD_PABLO_CONDITIONAL_TRUE@ -BUILD_PARALLEL_CONDITIONAL_FALSE = @BUILD_PARALLEL_CONDITIONAL_FALSE@ -BUILD_PARALLEL_CONDITIONAL_TRUE = @BUILD_PARALLEL_CONDITIONAL_TRUE@ -BUILD_PDB2HDF = @BUILD_PDB2HDF@ -BUILD_PDB2HDF_CONDITIONAL_FALSE = @BUILD_PDB2HDF_CONDITIONAL_FALSE@ -BUILD_PDB2HDF_CONDITIONAL_TRUE = @BUILD_PDB2HDF_CONDITIONAL_TRUE@ -BYTESEX = @BYTESEX@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CC_VERSION = @CC_VERSION@ -CFLAGS = @CFLAGS@ -CONFIG_DATE = @CONFIG_DATE@ -CONFIG_MODE = @CONFIG_MODE@ -CONFIG_USER = @CONFIG_USER@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CXX = @CXX@ -CXXCPP = @CXXCPP@ -CXXDEPMODE = @CXXDEPMODE@ -CXXFLAGS = @CXXFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEBUG_PKG = @DEBUG_PKG@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -DYNAMIC_DIRS = @DYNAMIC_DIRS@ -ECHO = @ECHO@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -F77 = @F77@ - -# Make sure that these variables are exported to the Makefiles -F9XMODEXT = @F9XMODEXT@ -F9XMODFLAG = @F9XMODFLAG@ -F9XSUFFIXFLAG = @F9XSUFFIXFLAG@ -FC = @FC@ -FCFLAGS = @FCFLAGS@ -FCLIBS = @FCLIBS@ -FFLAGS = @FFLAGS@ -FILTERS = @FILTERS@ -FSEARCH_DIRS = @FSEARCH_DIRS@ -H5_VERSION = @H5_VERSION@ -HADDR_T = @HADDR_T@ -HDF5_INTERFACES = @HDF5_INTERFACES@ -HID_T = @HID_T@ -HL = @HL@ -HL_FOR = @HL_FOR@ -HSIZET = @HSIZET@ -HSIZE_T = @HSIZE_T@ -HSSIZE_T = @HSSIZE_T@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -INSTRUMENT_LIBRARY = @INSTRUMENT_LIBRARY@ -LDFLAGS = @LDFLAGS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LIBTOOL = @LIBTOOL@ -LN_S = @LN_S@ -LTLIBOBJS = @LTLIBOBJS@ -LT_STATIC_EXEC = @LT_STATIC_EXEC@ -MAINT = @MAINT@ -MAINTAINER_MODE_FALSE = @MAINTAINER_MODE_FALSE@ -MAINTAINER_MODE_TRUE = @MAINTAINER_MODE_TRUE@ -MAKEINFO = @MAKEINFO@ -MPE = @MPE@ -OBJECT_NAMELEN_DEFAULT_F = @OBJECT_NAMELEN_DEFAULT_F@ -OBJEXT = @OBJEXT@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PARALLEL = @PARALLEL@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PERL = @PERL@ -PTHREAD = @PTHREAD@ -RANLIB = @RANLIB@ -ROOT = @ROOT@ -RUNPARALLEL = @RUNPARALLEL@ -RUNSERIAL = @RUNSERIAL@ -R_INTEGER = @R_INTEGER@ -R_LARGE = @R_LARGE@ -SEARCH = @SEARCH@ -SETX = @SETX@ -SET_MAKE = @SET_MAKE@ - -# Hardcode SHELL to be /bin/sh. Most machines have this shell, and -# on at least one machine configure fails to detect its existence (janus). -# Also, when HDF5 is configured on one machine but run on another, -# configure's automatic SHELL detection may not work on the build machine. -SHELL = /bin/sh -SIZE_T = @SIZE_T@ -STATIC_SHARED = @STATIC_SHARED@ -STRIP = @STRIP@ -TESTPARALLEL = @TESTPARALLEL@ -TRACE_API = @TRACE_API@ -USE_FILTER_DEFLATE = @USE_FILTER_DEFLATE@ -USE_FILTER_FLETCHER32 = @USE_FILTER_FLETCHER32@ -USE_FILTER_NBIT = @USE_FILTER_NBIT@ -USE_FILTER_SCALEOFFSET = @USE_FILTER_SCALEOFFSET@ -USE_FILTER_SHUFFLE = @USE_FILTER_SHUFFLE@ -USE_FILTER_SZIP = @USE_FILTER_SZIP@ -VERSION = @VERSION@ -ac_ct_AR = @ac_ct_AR@ -ac_ct_CC = @ac_ct_CC@ -ac_ct_CXX = @ac_ct_CXX@ -ac_ct_F77 = @ac_ct_F77@ -ac_ct_FC = @ac_ct_FC@ -ac_ct_RANLIB = @ac_ct_RANLIB@ -ac_ct_STRIP = @ac_ct_STRIP@ -am__fastdepCC_FALSE = @am__fastdepCC_FALSE@ -am__fastdepCC_TRUE = @am__fastdepCC_TRUE@ -am__fastdepCXX_FALSE = @am__fastdepCXX_FALSE@ -am__fastdepCXX_TRUE = @am__fastdepCXX_TRUE@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_vendor = @build_vendor@ -datadir = @datadir@ -exec_prefix = @exec_prefix@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ - -# Install directories that automake doesn't know about -includedir = $(exec_prefix)/include -infodir = @infodir@ -install_sh = @install_sh@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localstatedir = @localstatedir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -oldincludedir = @oldincludedir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ - -# Shell commands used in Makefiles -RM = rm -f -CP = cp - -# Some machines need a command to run executables; this is that command -# so that our tests will run. -# We use RUNTESTS instead of RUNSERIAL directly because it may be that -# some tests need to be run with a different command. Older versions -# of the makefiles used the command -# $(LIBTOOL) --mode=execute -# in some directories, for instance. -RUNTESTS = $(RUNSERIAL) - -# Libraries to link to while building -LIBHDF5 = $(top_builddir)/src/libhdf5.la -LIBH5TEST = $(top_builddir)/test/libh5test.la -LIBH5F = $(top_builddir)/fortran/src/libhdf5_fortran.la -LIBH5FTEST = $(top_builddir)/fortran/test/libh5test_fortran.la -LIBH5CPP = $(top_builddir)/c++/src/libhdf5_cpp.la -LIBH5TOOLS = $(top_builddir)/tools/lib/libh5tools.la -LIBH5_HL = $(top_builddir)/hl/src/libhdf5_hl.la -LIBH5F_HL = $(top_builddir)/hl/fortran/src/libhdf5hl_fortran.la -LIBH5CPP_HL = $(top_builddir)/hl/c++/src/libhdf5_hl_cpp.la -docdir = $(exec_prefix)/doc - -# Scripts used to build examples -H5CC = $(bindir)/h5cc -H5CC_PP = $(bindir)/h5pcc -H5FC = $(bindir)/h5fc -H5FC_PP = $(bindir)/h5pfc - -# .chkexe and .chksh files are used to mark tests that have run successfully. -MOSTLYCLEANFILES = *.chkexe *.chksh -localdocdir = $(docdir)/hdf5/Tutor/Graphics - -# Public doc files (to be installed)... -localdoc_DATA = AddInfo.gif AdvTopics.gif BLANK.gif ChunkExt.gif CompDTypes.gif \ - Copy.gif CreateAttr.gif CreateDset1.gif CreateDset2.gif \ - CreateFile.gif CreateGrp1.gif CreateGrp2.gif Examples.gif \ - FileOrg.gif FullTOC1.gif FullTOC2.gif Glossary.gif H5API.gif \ - Intro.gif IntroTopics.gif Iterate.gif MountFile.gif Quiz.gif \ - QuizAns.gif RdWrDataset.gif RefObject.gif RefRegion.gif \ - References.gif SelectElemCp.gif SelectHyp.gif TOC.gif \ - TOCFull.gif TOCShort.gif TitlePg.gif Utilities.gif - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/config/commence-doc.am $(top_srcdir)/config/commence.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \ - && exit 0; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign doc/html/Tutor/Graphics/Makefile'; \ - cd $(top_srcdir) && \ - $(AUTOMAKE) --foreign doc/html/Tutor/Graphics/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -mostlyclean-libtool: - -rm -f *.lo - -clean-libtool: - -rm -rf .libs _libs - -distclean-libtool: - -rm -f libtool -uninstall-info-am: -install-localdocDATA: $(localdoc_DATA) - @$(NORMAL_INSTALL) - test -z "$(localdocdir)" || $(mkdir_p) "$(DESTDIR)$(localdocdir)" - @list='$(localdoc_DATA)'; for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - f=$(am__strip_dir) \ - echo " $(localdocDATA_INSTALL) '$$d$$p' '$(DESTDIR)$(localdocdir)/$$f'"; \ - $(localdocDATA_INSTALL) "$$d$$p" "$(DESTDIR)$(localdocdir)/$$f"; \ - done - -uninstall-localdocDATA: - @$(NORMAL_UNINSTALL) - @list='$(localdoc_DATA)'; for p in $$list; do \ - f=$(am__strip_dir) \ - echo " rm -f '$(DESTDIR)$(localdocdir)/$$f'"; \ - rm -f "$(DESTDIR)$(localdocdir)/$$f"; \ - done -tags: TAGS -TAGS: - -ctags: CTAGS -CTAGS: - - -distdir: $(DISTFILES) - $(mkdir_p) $(distdir)/../../../../config - @srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's|.|.|g'`; \ - list='$(DISTFILES)'; for file in $$list; do \ - case $$file in \ - $(srcdir)/*) file=`echo "$$file" | sed "s|^$$srcdirstrip/||"`;; \ - $(top_srcdir)/*) file=`echo "$$file" | sed "s|^$$topsrcdirstrip/|$(top_builddir)/|"`;; \ - esac; \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - dir=`echo "$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test "$$dir" != "$$file" && test "$$dir" != "."; then \ - dir="/$$dir"; \ - $(mkdir_p) "$(distdir)$$dir"; \ - else \ - dir=''; \ - fi; \ - if test -d $$d/$$file; then \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \ - fi; \ - cp -pR $$d/$$file $(distdir)$$dir || exit 1; \ - else \ - test -f $(distdir)/$$file \ - || cp -p $$d/$$file $(distdir)/$$file \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: - for dir in "$(DESTDIR)$(localdocdir)"; do \ - test -z "$$dir" || $(mkdir_p) "$$dir"; \ - done -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - `test -z '$(STRIP)' || \ - echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install -mostlyclean-generic: - -test -z "$(MOSTLYCLEANFILES)" || rm -f $(MOSTLYCLEANFILES) - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." -clean: clean-am - -clean-am: clean-generic clean-libtool mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic distclean-libtool - -dvi: dvi-am - -dvi-am: - -html: html-am - -info: info-am - -info-am: - -install-data-am: install-localdocDATA - -install-exec-am: - -install-info: install-info-am - -install-man: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic mostlyclean-libtool - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: uninstall-info-am uninstall-localdocDATA - -.PHONY: all all-am check check-am clean clean-generic clean-libtool \ - distclean distclean-generic distclean-libtool distdir dvi \ - dvi-am html html-am info info-am install install-am \ - install-data install-data-am install-exec install-exec-am \ - install-info install-info-am install-localdocDATA install-man \ - install-strip installcheck installcheck-am installdirs \ - maintainer-clean maintainer-clean-generic mostlyclean \ - mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ - uninstall uninstall-am uninstall-info-am \ - uninstall-localdocDATA - - -# Ignore most rules -lib progs check test _test check-p check-s: - @echo "Nothing to be done" - -tests dep depend: - @@SETX@; for d in X $(SUBDIRS); do \ - if test $$d != X; then \ - (cd $$d && $(MAKE) $(AM_MAKEFLAGS) $@) || exit 1; \ - fi; - done - -# In docs directory, install-doc is the same as install -install-doc install-all: - $(MAKE) $(AM_MAKEFLAGS) install -uninstall-doc uninstall-all: - $(MAKE) $(AM_MAKEFLAGS) uninstall -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/doc/html/Tutor/Graphics/MountFile.gif b/doc/html/Tutor/Graphics/MountFile.gif deleted file mode 100644 index fb476df..0000000 Binary files a/doc/html/Tutor/Graphics/MountFile.gif and /dev/null differ diff --git a/doc/html/Tutor/Graphics/Quiz.gif b/doc/html/Tutor/Graphics/Quiz.gif deleted file mode 100644 index 5bba11e..0000000 Binary files a/doc/html/Tutor/Graphics/Quiz.gif and /dev/null differ diff --git a/doc/html/Tutor/Graphics/QuizAns.gif b/doc/html/Tutor/Graphics/QuizAns.gif deleted file mode 100644 index 6b73566..0000000 Binary files a/doc/html/Tutor/Graphics/QuizAns.gif and /dev/null differ diff --git a/doc/html/Tutor/Graphics/RdWrDataset.gif b/doc/html/Tutor/Graphics/RdWrDataset.gif deleted file mode 100644 index 962fed1..0000000 Binary files a/doc/html/Tutor/Graphics/RdWrDataset.gif and /dev/null differ diff --git a/doc/html/Tutor/Graphics/RefObject.gif b/doc/html/Tutor/Graphics/RefObject.gif deleted file mode 100755 index ae9dc05..0000000 Binary files a/doc/html/Tutor/Graphics/RefObject.gif and /dev/null differ diff --git a/doc/html/Tutor/Graphics/RefRegion.gif b/doc/html/Tutor/Graphics/RefRegion.gif deleted file mode 100755 index 250039f..0000000 Binary files a/doc/html/Tutor/Graphics/RefRegion.gif and /dev/null differ diff --git a/doc/html/Tutor/Graphics/References.gif b/doc/html/Tutor/Graphics/References.gif deleted file mode 100644 index 0d94116..0000000 Binary files a/doc/html/Tutor/Graphics/References.gif and /dev/null differ diff --git a/doc/html/Tutor/Graphics/SelectElemCp.gif b/doc/html/Tutor/Graphics/SelectElemCp.gif deleted file mode 100644 index ed8e976..0000000 Binary files a/doc/html/Tutor/Graphics/SelectElemCp.gif and /dev/null differ diff --git a/doc/html/Tutor/Graphics/SelectHyp.gif b/doc/html/Tutor/Graphics/SelectHyp.gif deleted file mode 100644 index 29ac707..0000000 Binary files a/doc/html/Tutor/Graphics/SelectHyp.gif and /dev/null differ diff --git a/doc/html/Tutor/Graphics/TOC.gif b/doc/html/Tutor/Graphics/TOC.gif deleted file mode 100644 index 0bc3251..0000000 Binary files a/doc/html/Tutor/Graphics/TOC.gif and /dev/null differ diff --git a/doc/html/Tutor/Graphics/TOCFull.gif b/doc/html/Tutor/Graphics/TOCFull.gif deleted file mode 100644 index bb1e982..0000000 Binary files a/doc/html/Tutor/Graphics/TOCFull.gif and /dev/null differ diff --git a/doc/html/Tutor/Graphics/TOCShort.gif b/doc/html/Tutor/Graphics/TOCShort.gif deleted file mode 100644 index a17aaee..0000000 Binary files a/doc/html/Tutor/Graphics/TOCShort.gif and /dev/null differ diff --git a/doc/html/Tutor/Graphics/TitlePg.gif b/doc/html/Tutor/Graphics/TitlePg.gif deleted file mode 100644 index 5f9a633..0000000 Binary files a/doc/html/Tutor/Graphics/TitlePg.gif and /dev/null differ diff --git a/doc/html/Tutor/Graphics/Utilities.gif b/doc/html/Tutor/Graphics/Utilities.gif deleted file mode 100644 index 62e1d78..0000000 Binary files a/doc/html/Tutor/Graphics/Utilities.gif and /dev/null differ diff --git a/doc/html/Tutor/Makefile.am b/doc/html/Tutor/Makefile.am deleted file mode 100644 index ed7ab7a..0000000 --- a/doc/html/Tutor/Makefile.am +++ /dev/null @@ -1,25 +0,0 @@ -# HDF5 Library Doc Makefile(.in) -# -# Copyright (C) 1997, 2002 -# National Center for Supercomputing Applications. -# All rights reserved. -# -## -## Makefile.am -## Run automake to generate a Makefile.in from this file. -# - -include $(top_srcdir)/config/commence-doc.am - -SUBDIRS=Graphics examples -localdocdir = $(docdir)/hdf5/Tutor - -# Public doc files (to be installed)... -localdoc_DATA=Contents.html ContentsAdd.html ContentsAdv.html \ - ContentsFull.html ContentsIntro.html Copyright.html answers.html \ - api.html bighdf2sp.JPG compound.html crtatt.html crtdat.html \ - crtfile.html crtgrp.html crtgrpar.html crtgrpd.html extend.html \ - fileorg.html glossary.html img001.gif img002.gif img003.gif \ - img004.gif img005.gif index.html intro.html iterate.html \ - mount.html questions.html rdwt.html references.html reftoobj.html \ - reftoreg.html select.html selectc.html title.html util.html diff --git a/doc/html/Tutor/Makefile.in b/doc/html/Tutor/Makefile.in deleted file mode 100644 index 5a16306..0000000 --- a/doc/html/Tutor/Makefile.in +++ /dev/null @@ -1,651 +0,0 @@ -# Makefile.in generated by automake 1.9.5 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005 Free Software Foundation, Inc. -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# HDF5 Library Doc Makefile(.in) -# -# Copyright (C) 1997, 2002 -# National Center for Supercomputing Applications. -# All rights reserved. -# -# - -srcdir = @srcdir@ -top_srcdir = @top_srcdir@ -VPATH = @srcdir@ -pkgdatadir = $(datadir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -top_builddir = ../../.. -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -INSTALL = @INSTALL@ -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ - $(top_srcdir)/config/commence-doc.am \ - $(top_srcdir)/config/commence.am -subdir = doc/html/Tutor -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/configure.in -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(SHELL) $(top_srcdir)/bin/mkinstalldirs -CONFIG_HEADER = $(top_builddir)/src/H5config.h -CONFIG_CLEAN_FILES = -SOURCES = -DIST_SOURCES = -RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ - html-recursive info-recursive install-data-recursive \ - install-exec-recursive install-info-recursive \ - install-recursive installcheck-recursive installdirs-recursive \ - pdf-recursive ps-recursive uninstall-info-recursive \ - uninstall-recursive -am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; -am__vpath_adj = case $$p in \ - $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ - *) f=$$p;; \ - esac; -am__strip_dir = `echo $$p | sed -e 's|^.*/||'`; -am__installdirs = "$(DESTDIR)$(localdocdir)" -localdocDATA_INSTALL = $(INSTALL_DATA) -DATA = $(localdoc_DATA) -ETAGS = etags -CTAGS = ctags -DIST_SUBDIRS = $(SUBDIRS) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) - -# Set the paths for AFS installs of autotools for Linux machines -# Ideally, these tools should never be needed during the build. -ACLOCAL = /afs/ncsa/projects/hdf/packages/automake_1.9.5/Linux_2.4/bin/aclocal -I /afs/ncsa/projects/hdf/packages/libtool_1.5.14/Linux_2.4/share/aclocal -ADD_PARALLEL_FILES = @ADD_PARALLEL_FILES@ -AMDEP_FALSE = @AMDEP_FALSE@ -AMDEP_TRUE = @AMDEP_TRUE@ -AMTAR = @AMTAR@ -AM_MAKEFLAGS = @AM_MAKEFLAGS@ -AR = @AR@ -AUTOCONF = /afs/ncsa/projects/hdf/packages/autoconf_2.59/Linux_2.4/bin/autoconf -AUTOHEADER = /afs/ncsa/projects/hdf/packages/autoconf_2.59/Linux_2.4/bin/autoheader -AUTOMAKE = /afs/ncsa/projects/hdf/packages/automake_1.9.5/Linux_2.4/bin/automake -AWK = @AWK@ -BUILD_CXX_CONDITIONAL_FALSE = @BUILD_CXX_CONDITIONAL_FALSE@ -BUILD_CXX_CONDITIONAL_TRUE = @BUILD_CXX_CONDITIONAL_TRUE@ -BUILD_FORTRAN_CONDITIONAL_FALSE = @BUILD_FORTRAN_CONDITIONAL_FALSE@ -BUILD_FORTRAN_CONDITIONAL_TRUE = @BUILD_FORTRAN_CONDITIONAL_TRUE@ -BUILD_HDF5_HL_CONDITIONAL_FALSE = @BUILD_HDF5_HL_CONDITIONAL_FALSE@ -BUILD_HDF5_HL_CONDITIONAL_TRUE = @BUILD_HDF5_HL_CONDITIONAL_TRUE@ -BUILD_PABLO_CONDITIONAL_FALSE = @BUILD_PABLO_CONDITIONAL_FALSE@ -BUILD_PABLO_CONDITIONAL_TRUE = @BUILD_PABLO_CONDITIONAL_TRUE@ -BUILD_PARALLEL_CONDITIONAL_FALSE = @BUILD_PARALLEL_CONDITIONAL_FALSE@ -BUILD_PARALLEL_CONDITIONAL_TRUE = @BUILD_PARALLEL_CONDITIONAL_TRUE@ -BUILD_PDB2HDF = @BUILD_PDB2HDF@ -BUILD_PDB2HDF_CONDITIONAL_FALSE = @BUILD_PDB2HDF_CONDITIONAL_FALSE@ -BUILD_PDB2HDF_CONDITIONAL_TRUE = @BUILD_PDB2HDF_CONDITIONAL_TRUE@ -BYTESEX = @BYTESEX@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CC_VERSION = @CC_VERSION@ -CFLAGS = @CFLAGS@ -CONFIG_DATE = @CONFIG_DATE@ -CONFIG_MODE = @CONFIG_MODE@ -CONFIG_USER = @CONFIG_USER@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CXX = @CXX@ -CXXCPP = @CXXCPP@ -CXXDEPMODE = @CXXDEPMODE@ -CXXFLAGS = @CXXFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEBUG_PKG = @DEBUG_PKG@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -DYNAMIC_DIRS = @DYNAMIC_DIRS@ -ECHO = @ECHO@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -F77 = @F77@ - -# Make sure that these variables are exported to the Makefiles -F9XMODEXT = @F9XMODEXT@ -F9XMODFLAG = @F9XMODFLAG@ -F9XSUFFIXFLAG = @F9XSUFFIXFLAG@ -FC = @FC@ -FCFLAGS = @FCFLAGS@ -FCLIBS = @FCLIBS@ -FFLAGS = @FFLAGS@ -FILTERS = @FILTERS@ -FSEARCH_DIRS = @FSEARCH_DIRS@ -H5_VERSION = @H5_VERSION@ -HADDR_T = @HADDR_T@ -HDF5_INTERFACES = @HDF5_INTERFACES@ -HID_T = @HID_T@ -HL = @HL@ -HL_FOR = @HL_FOR@ -HSIZET = @HSIZET@ -HSIZE_T = @HSIZE_T@ -HSSIZE_T = @HSSIZE_T@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -INSTRUMENT_LIBRARY = @INSTRUMENT_LIBRARY@ -LDFLAGS = @LDFLAGS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LIBTOOL = @LIBTOOL@ -LN_S = @LN_S@ -LTLIBOBJS = @LTLIBOBJS@ -LT_STATIC_EXEC = @LT_STATIC_EXEC@ -MAINT = @MAINT@ -MAINTAINER_MODE_FALSE = @MAINTAINER_MODE_FALSE@ -MAINTAINER_MODE_TRUE = @MAINTAINER_MODE_TRUE@ -MAKEINFO = @MAKEINFO@ -MPE = @MPE@ -OBJECT_NAMELEN_DEFAULT_F = @OBJECT_NAMELEN_DEFAULT_F@ -OBJEXT = @OBJEXT@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PARALLEL = @PARALLEL@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PERL = @PERL@ -PTHREAD = @PTHREAD@ -RANLIB = @RANLIB@ -ROOT = @ROOT@ -RUNPARALLEL = @RUNPARALLEL@ -RUNSERIAL = @RUNSERIAL@ -R_INTEGER = @R_INTEGER@ -R_LARGE = @R_LARGE@ -SEARCH = @SEARCH@ -SETX = @SETX@ -SET_MAKE = @SET_MAKE@ - -# Hardcode SHELL to be /bin/sh. Most machines have this shell, and -# on at least one machine configure fails to detect its existence (janus). -# Also, when HDF5 is configured on one machine but run on another, -# configure's automatic SHELL detection may not work on the build machine. -SHELL = /bin/sh -SIZE_T = @SIZE_T@ -STATIC_SHARED = @STATIC_SHARED@ -STRIP = @STRIP@ -TESTPARALLEL = @TESTPARALLEL@ -TRACE_API = @TRACE_API@ -USE_FILTER_DEFLATE = @USE_FILTER_DEFLATE@ -USE_FILTER_FLETCHER32 = @USE_FILTER_FLETCHER32@ -USE_FILTER_NBIT = @USE_FILTER_NBIT@ -USE_FILTER_SCALEOFFSET = @USE_FILTER_SCALEOFFSET@ -USE_FILTER_SHUFFLE = @USE_FILTER_SHUFFLE@ -USE_FILTER_SZIP = @USE_FILTER_SZIP@ -VERSION = @VERSION@ -ac_ct_AR = @ac_ct_AR@ -ac_ct_CC = @ac_ct_CC@ -ac_ct_CXX = @ac_ct_CXX@ -ac_ct_F77 = @ac_ct_F77@ -ac_ct_FC = @ac_ct_FC@ -ac_ct_RANLIB = @ac_ct_RANLIB@ -ac_ct_STRIP = @ac_ct_STRIP@ -am__fastdepCC_FALSE = @am__fastdepCC_FALSE@ -am__fastdepCC_TRUE = @am__fastdepCC_TRUE@ -am__fastdepCXX_FALSE = @am__fastdepCXX_FALSE@ -am__fastdepCXX_TRUE = @am__fastdepCXX_TRUE@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_vendor = @build_vendor@ -datadir = @datadir@ -exec_prefix = @exec_prefix@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ - -# Install directories that automake doesn't know about -includedir = $(exec_prefix)/include -infodir = @infodir@ -install_sh = @install_sh@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localstatedir = @localstatedir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -oldincludedir = @oldincludedir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ - -# Shell commands used in Makefiles -RM = rm -f -CP = cp - -# Some machines need a command to run executables; this is that command -# so that our tests will run. -# We use RUNTESTS instead of RUNSERIAL directly because it may be that -# some tests need to be run with a different command. Older versions -# of the makefiles used the command -# $(LIBTOOL) --mode=execute -# in some directories, for instance. -RUNTESTS = $(RUNSERIAL) - -# Libraries to link to while building -LIBHDF5 = $(top_builddir)/src/libhdf5.la -LIBH5TEST = $(top_builddir)/test/libh5test.la -LIBH5F = $(top_builddir)/fortran/src/libhdf5_fortran.la -LIBH5FTEST = $(top_builddir)/fortran/test/libh5test_fortran.la -LIBH5CPP = $(top_builddir)/c++/src/libhdf5_cpp.la -LIBH5TOOLS = $(top_builddir)/tools/lib/libh5tools.la -LIBH5_HL = $(top_builddir)/hl/src/libhdf5_hl.la -LIBH5F_HL = $(top_builddir)/hl/fortran/src/libhdf5hl_fortran.la -LIBH5CPP_HL = $(top_builddir)/hl/c++/src/libhdf5_hl_cpp.la -docdir = $(exec_prefix)/doc - -# Scripts used to build examples -H5CC = $(bindir)/h5cc -H5CC_PP = $(bindir)/h5pcc -H5FC = $(bindir)/h5fc -H5FC_PP = $(bindir)/h5pfc - -# .chkexe and .chksh files are used to mark tests that have run successfully. -MOSTLYCLEANFILES = *.chkexe *.chksh -SUBDIRS = Graphics examples -localdocdir = $(docdir)/hdf5/Tutor - -# Public doc files (to be installed)... -localdoc_DATA = Contents.html ContentsAdd.html ContentsAdv.html \ - ContentsFull.html ContentsIntro.html Copyright.html answers.html \ - api.html bighdf2sp.JPG compound.html crtatt.html crtdat.html \ - crtfile.html crtgrp.html crtgrpar.html crtgrpd.html extend.html \ - fileorg.html glossary.html img001.gif img002.gif img003.gif \ - img004.gif img005.gif index.html intro.html iterate.html \ - mount.html questions.html rdwt.html references.html reftoobj.html \ - reftoreg.html select.html selectc.html title.html util.html - -all: all-recursive - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/config/commence-doc.am $(top_srcdir)/config/commence.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \ - && exit 0; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign doc/html/Tutor/Makefile'; \ - cd $(top_srcdir) && \ - $(AUTOMAKE) --foreign doc/html/Tutor/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -mostlyclean-libtool: - -rm -f *.lo - -clean-libtool: - -rm -rf .libs _libs - -distclean-libtool: - -rm -f libtool -uninstall-info-am: -install-localdocDATA: $(localdoc_DATA) - @$(NORMAL_INSTALL) - test -z "$(localdocdir)" || $(mkdir_p) "$(DESTDIR)$(localdocdir)" - @list='$(localdoc_DATA)'; for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - f=$(am__strip_dir) \ - echo " $(localdocDATA_INSTALL) '$$d$$p' '$(DESTDIR)$(localdocdir)/$$f'"; \ - $(localdocDATA_INSTALL) "$$d$$p" "$(DESTDIR)$(localdocdir)/$$f"; \ - done - -uninstall-localdocDATA: - @$(NORMAL_UNINSTALL) - @list='$(localdoc_DATA)'; for p in $$list; do \ - f=$(am__strip_dir) \ - echo " rm -f '$(DESTDIR)$(localdocdir)/$$f'"; \ - rm -f "$(DESTDIR)$(localdocdir)/$$f"; \ - done - -# This directory's subdirectories are mostly independent; you can cd -# into them and run `make' without going through this Makefile. -# To change the values of `make' variables: instead of editing Makefiles, -# (1) if the variable is set in `config.status', edit `config.status' -# (which will cause the Makefiles to be regenerated when you run `make'); -# (2) otherwise, pass the desired values on the `make' command line. -$(RECURSIVE_TARGETS): - @failcom='exit 1'; \ - for f in x $$MAKEFLAGS; do \ - case $$f in \ - *=* | --[!k]*);; \ - *k*) failcom='fail=yes';; \ - esac; \ - done; \ - dot_seen=no; \ - target=`echo $@ | sed s/-recursive//`; \ - list='$(SUBDIRS)'; for subdir in $$list; do \ - echo "Making $$target in $$subdir"; \ - if test "$$subdir" = "."; then \ - dot_seen=yes; \ - local_target="$$target-am"; \ - else \ - local_target="$$target"; \ - fi; \ - (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ - || eval $$failcom; \ - done; \ - if test "$$dot_seen" = "no"; then \ - $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ - fi; test -z "$$fail" - -mostlyclean-recursive clean-recursive distclean-recursive \ -maintainer-clean-recursive: - @failcom='exit 1'; \ - for f in x $$MAKEFLAGS; do \ - case $$f in \ - *=* | --[!k]*);; \ - *k*) failcom='fail=yes';; \ - esac; \ - done; \ - dot_seen=no; \ - case "$@" in \ - distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ - *) list='$(SUBDIRS)' ;; \ - esac; \ - rev=''; for subdir in $$list; do \ - if test "$$subdir" = "."; then :; else \ - rev="$$subdir $$rev"; \ - fi; \ - done; \ - rev="$$rev ."; \ - target=`echo $@ | sed s/-recursive//`; \ - for subdir in $$rev; do \ - echo "Making $$target in $$subdir"; \ - if test "$$subdir" = "."; then \ - local_target="$$target-am"; \ - else \ - local_target="$$target"; \ - fi; \ - (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ - || eval $$failcom; \ - done && test -z "$$fail" -tags-recursive: - list='$(SUBDIRS)'; for subdir in $$list; do \ - test "$$subdir" = . || (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ - done -ctags-recursive: - list='$(SUBDIRS)'; for subdir in $$list; do \ - test "$$subdir" = . || (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ - done - -ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) - list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ - mkid -fID $$unique -tags: TAGS - -TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ - $(TAGS_FILES) $(LISP) - tags=; \ - here=`pwd`; \ - if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ - include_option=--etags-include; \ - empty_fix=.; \ - else \ - include_option=--include; \ - empty_fix=; \ - fi; \ - list='$(SUBDIRS)'; for subdir in $$list; do \ - if test "$$subdir" = .; then :; else \ - test ! -f $$subdir/TAGS || \ - tags="$$tags $$include_option=$$here/$$subdir/TAGS"; \ - fi; \ - done; \ - list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ - if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ - test -n "$$unique" || unique=$$empty_fix; \ - $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ - $$tags $$unique; \ - fi -ctags: CTAGS -CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ - $(TAGS_FILES) $(LISP) - tags=; \ - here=`pwd`; \ - list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ - test -z "$(CTAGS_ARGS)$$tags$$unique" \ - || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ - $$tags $$unique - -GTAGS: - here=`$(am__cd) $(top_builddir) && pwd` \ - && cd $(top_srcdir) \ - && gtags -i $(GTAGS_ARGS) $$here - -distclean-tags: - -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags - -distdir: $(DISTFILES) - $(mkdir_p) $(distdir)/../../../config - @srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's|.|.|g'`; \ - list='$(DISTFILES)'; for file in $$list; do \ - case $$file in \ - $(srcdir)/*) file=`echo "$$file" | sed "s|^$$srcdirstrip/||"`;; \ - $(top_srcdir)/*) file=`echo "$$file" | sed "s|^$$topsrcdirstrip/|$(top_builddir)/|"`;; \ - esac; \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - dir=`echo "$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test "$$dir" != "$$file" && test "$$dir" != "."; then \ - dir="/$$dir"; \ - $(mkdir_p) "$(distdir)$$dir"; \ - else \ - dir=''; \ - fi; \ - if test -d $$d/$$file; then \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \ - fi; \ - cp -pR $$d/$$file $(distdir)$$dir || exit 1; \ - else \ - test -f $(distdir)/$$file \ - || cp -p $$d/$$file $(distdir)/$$file \ - || exit 1; \ - fi; \ - done - list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ - if test "$$subdir" = .; then :; else \ - test -d "$(distdir)/$$subdir" \ - || $(mkdir_p) "$(distdir)/$$subdir" \ - || exit 1; \ - distdir=`$(am__cd) $(distdir) && pwd`; \ - top_distdir=`$(am__cd) $(top_distdir) && pwd`; \ - (cd $$subdir && \ - $(MAKE) $(AM_MAKEFLAGS) \ - top_distdir="$$top_distdir" \ - distdir="$$distdir/$$subdir" \ - distdir) \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-recursive -all-am: Makefile $(DATA) -installdirs: installdirs-recursive -installdirs-am: - for dir in "$(DESTDIR)$(localdocdir)"; do \ - test -z "$$dir" || $(mkdir_p) "$$dir"; \ - done -install: install-recursive -install-exec: install-exec-recursive -install-data: install-data-recursive -uninstall: uninstall-recursive - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-recursive -install-strip: - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - `test -z '$(STRIP)' || \ - echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install -mostlyclean-generic: - -test -z "$(MOSTLYCLEANFILES)" || rm -f $(MOSTLYCLEANFILES) - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." -clean: clean-recursive - -clean-am: clean-generic clean-libtool mostlyclean-am - -distclean: distclean-recursive - -rm -f Makefile -distclean-am: clean-am distclean-generic distclean-libtool \ - distclean-tags - -dvi: dvi-recursive - -dvi-am: - -html: html-recursive - -info: info-recursive - -info-am: - -install-data-am: install-localdocDATA - -install-exec-am: - -install-info: install-info-recursive - -install-man: - -installcheck-am: - -maintainer-clean: maintainer-clean-recursive - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-recursive - -mostlyclean-am: mostlyclean-generic mostlyclean-libtool - -pdf: pdf-recursive - -pdf-am: - -ps: ps-recursive - -ps-am: - -uninstall-am: uninstall-info-am uninstall-localdocDATA - -uninstall-info: uninstall-info-recursive - -.PHONY: $(RECURSIVE_TARGETS) CTAGS GTAGS all all-am check check-am \ - clean clean-generic clean-libtool clean-recursive ctags \ - ctags-recursive distclean distclean-generic distclean-libtool \ - distclean-recursive distclean-tags distdir dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-exec install-exec-am install-info \ - install-info-am install-localdocDATA install-man install-strip \ - installcheck installcheck-am installdirs installdirs-am \ - maintainer-clean maintainer-clean-generic \ - maintainer-clean-recursive mostlyclean mostlyclean-generic \ - mostlyclean-libtool mostlyclean-recursive pdf pdf-am ps ps-am \ - tags tags-recursive uninstall uninstall-am uninstall-info-am \ - uninstall-localdocDATA - - -# Ignore most rules -lib progs check test _test check-p check-s: - @echo "Nothing to be done" - -tests dep depend: - @@SETX@; for d in X $(SUBDIRS); do \ - if test $$d != X; then \ - (cd $$d && $(MAKE) $(AM_MAKEFLAGS) $@) || exit 1; \ - fi; - done - -# In docs directory, install-doc is the same as install -install-doc install-all: - $(MAKE) $(AM_MAKEFLAGS) install -uninstall-doc uninstall-all: - $(MAKE) $(AM_MAKEFLAGS) uninstall -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/doc/html/Tutor/answers.html b/doc/html/Tutor/answers.html deleted file mode 100644 index e0cd15e..0000000 --- a/doc/html/Tutor/answers.html +++ /dev/null @@ -1,322 +0,0 @@ - -HDF5 Tutorial - Introductory Topics Quiz with Answers - - - - - - - - - [ HDF5 Tutorial Top ] -

      -Introductory Topics Quiz - with Answers -

      - -
      - - -

      Section 2: HDF File Organization

      -
        - -
      1. Name and describe the two primary objects that can be stored in an HDF5 - file. - -
        -
        Answers: -
        Group: A grouping structure containing zero or more - HDF5 objects, together with supporting metadata. -
        Dataset: A multidimensional array of data elements, - together with supporting metadata. -
        - -

        -

      2. What is an attribute? - -
        -
        Answer: -
        An HDF5 attribute is a user-defined HDF5 structure that provides extra - information about an HDF5 object. -
        - -

        -

      3. Give the path name for an object called harry that is - a member of a group called dick, which, in turn, is a - member of the root group. - -
        -
        Answer: -
        /dick/harry -
        - - -
      -

      Section 3: The HDF5 API

      -
        - -
      1. Describe the purpose of each of the following HDF5 APIs: - - - H5A, H5D, H5E, H5F, H5G, H5T, H5Z - - -
        -
        Answers: - - H5A: Attribute access and manipulation routines
        - H5D: Dataset access and manipulation routines
        - H5E: Error handling routines
        - H5F: File access routines
        - H5G: Routines for creating and operating on groups
        - H5T: Routines for creating and manipulating the - datatypes of dataset elements
        - H5Z: Data compression routines -
        -
        - - -
      -

      Section 4: Creating an HDF5 File

      -
        - -
      1. What two HDF5 routines must be called to create an HDF5 file? - -
        -
        Answer: -
        H5Fcreate and H5Fclose. -
        - -

        -

      2. What include file must be included in any file that uses the HDF5 library? - -
        -
        Answer: -
        hdf5.h must be included because it contains definitions - and declarations used by the library. -
        - -

        -

      3. An HDF5 file is never completely empty because as soon as it is created, - it automatically contains a certain primary object. What is that object? - -
        -
        Answer: -
        The root group. -
        - - -
      -

      Section 5: Creating a Dataset

      -
        - -
      1. Name and describe two major datatype categories. - -
        -
        Answers: -
        Atomic datatype: - An atomic datatype cannot be decomposed into smaller units at the - API level. -
        - Compound datatype: - A compound datatype is a collection of atomic and compound datatypes, - or small arrays of such types. -
        - -

        -

      2. List the HDF5 atomic datatypes. Give an example of a predefined datatype. - -
        -
        Answers: -
        There are six HDF5 atomic datatypes: integer, floating point, - date and time, character string, bit field, and opaque. - Examples of predefined datatypes include the following: - - H5T_IEEE_F32LE - - 4-byte little-endian, IEEE floating point
        - H5T_NATIVE_INT - - native integer -
        -
        - -

        -

      3. What does the dataspace describe? What are the major characteristics of - the simple dataspace? - -
        -
        Answers: -
        The dataspace describes the dimensionality of the dataset. - A simple dataspace is characterized by its rank and dimension sizes. -
        - -

        -

      4. What information needs to be passed to the H5Dcreate function, i.e., - what information is needed to describe a dataset at creation time? - -
        -
        Answer: -
        The dataset location, name, dataspace, datatype, and dataset - creation property list. -
        - - -
      -

      Section 6: Reading from and Writing to a Dataset

      -
        - -
      1. What are six pieces of information which need to be specified for - reading and writing a dataset? - -
        -
        Answer: -
        The dataset identifier, the dataset's datatype and dataspace in - memory, the dataspace in the file, the dataset transfer property - list, and a data buffer. -
        - -

        -

      2. Why are both the memory dataspace and file dataspace needed for - read/write operations, while only the memory datatype is required? - -
        -
        Answer: -
        A dataset's file datatype is not required for a read/write operation - because the file datatype is specified when the dataset is created - and cannot be changed. Both file and memory dataspaces are required - for dataset subsetting and for performing partial I/O operations. -
        - -

        -

      3. What does the line -
            - DATASPACE { SIMPLE (4 , 6 ) / ( 4 , 6 ) } -
        in Figure 6.1 mean? - -
        -
        Answer: -
        It means that the dataset dset has a simple dataspace - with the current dimensions (4,6) and the maximum size of the - dimensions (4,6). -
        - - -
      -

      Section 7: Creating an Attribute

      -
        - -
      1. What is an attribute? - -
        -
        Answer: -
        An attribute is a dataset attached to an object. It describes the - nature and/or the intended usage of the object. -
        - -

        -

      2. Can partial I/O operations be performed on attributes? - -
        -
        Answer: -
        No. -
        - - -
      -

      Section 8: Creating a Group

      -
        - -
      1. What are the two primary objects that can be included in a group? - -
        -
        Answer: -
        A group and a dataset. -
        - - -
      -

      Section 9: Creating Groups Using Absolute and Relative Names

      -
        - -
      1. Group names can be specified in two ways. What are these two types - of group names? - -
        -
        Answer: -
        Relative and absolute. -
        - -

        -

      2. You have a dataset named moo in the group - boo, which is in the group foo, - which, in turn, is in the root group. - How would you specify an absolute name to access this dataset? - -
        -
        Answer: -
        /foo/boo/moo -
        - - -
      -

      Section 10: Creating Datasets in Groups

      -
        - -
      1. Describe a way to access the dataset moo described in - the previous section (Section 9, question 2) using a - relative name. - Describe a way to access the same dataset using an absolute name. - -
        -
        Answers: -
          -
        1. Access the group /foo and get the group ID. - Access the group boo using the group ID obtained - in Step 1. - Access the dataset moo using the group ID obtained - in Step 2. -
          -gid = H5Gopen (file_id, "/foo", 0);       /* absolute path */
          -gid1 = H5Gopen (gid, "boo", 0);           /* relative path */
          -did = H5Dopen (gid1, "moo");              /* relative path */  
          - -
        2. Access the group /foo and get the group ID. - Access the dataset boo/moo with the group ID - just obtained. -
          -gid = H5Gopen (file_id, "/foo", 0);       /* absolute path */
          -did = H5Dopen (gid, "boo/moo");           /* relative path */  
          - -
        3. Access the dataset with an absolute path. -
          -did = H5Dopen (file_id, "/foo/boo/moo");  /* absolute path */  
          -
        -
        - -
      - - - -


      - - NCSA
      - The National Center for Supercomputing Applications

      - University of Illinois - at Urbana-Champaign
      -
      - - -hdfhelp@ncsa.uiuc.edu -
      -
      Last Modified: June 22, 2001

      - -
      -
      - - - - - - - diff --git a/doc/html/Tutor/api.html b/doc/html/Tutor/api.html deleted file mode 100644 index a365e67..0000000 --- a/doc/html/Tutor/api.html +++ /dev/null @@ -1,151 +0,0 @@ - -The HDF5 API - - - - - - - - - [ HDF5 Tutorial Top ] -

      -The HDF5 API -

      - - -
      - - -

      -The HDF5 library provides several interfaces, or APIs. -These APIs provide routines for creating, accessing, and manipulating -HDF5 files and objects. -

      -The library itself is implemented in C. To facilitate the work of -FORTRAN90 and Java programmers, HDF5 function wrappers have been developed -in each of these languages. -At the time of this writing, a set of C++ wrappers is in development. -This tutorial discusses the use of the C functions and the FORTRAN wrappers. -

      -All C routines in the HDF5 library begin with a prefix of the form H5*, -where * is one or two uppercase letters indicating the type of object on which the -function operates. -The FORTRAN wrappers come in the form of subroutines that begin with -h5 and end with _f. The APIs are listed below: -

      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
      -
      API
      -
      -
      DESCRIPTION
      -
      -
      H5
      -
      Library Functions: general-purpose H5 functions
      -
      H5A
      -
      Annotation Interface: attribute access and manipulation - routines
      -
      H5D
      -
      Dataset Interface: dataset access and manipulation - routines
      -
      H5E
      -
      Error Interface: error handling routines
      -
      H5F
      -
      File Interface: file access routines
      -
      H5G
      -
      Group Interface: group creation and operation routines
      -
      H5I
      -
      Identifier Interface: identifier routines
      -
      H5P
      -
      Property List Interface: object property list manipulation - routines
      -
      H5R
      -
      Reference Interface: reference routines
      -
      H5S
      -
      Dataspace Interface: dataspace definition and access - routines
      -
      H5T
      -
      Datatype Interface: datatype creation and manipulation - routines
      -
      H5Z
      -
      Compression Interface: compression routine(s)
      - - - -


      - - NCSA
      - The National Center for Supercomputing Applications

      - University of Illinois - at Urbana-Champaign
      -
      - - -hdfhelp@ncsa.uiuc.edu -
      -
      Last Modified: June 22, 2001

      - - -
      -
      - - - - - - - diff --git a/doc/html/Tutor/bighdf2sp.JPG b/doc/html/Tutor/bighdf2sp.JPG deleted file mode 100644 index aad590e..0000000 Binary files a/doc/html/Tutor/bighdf2sp.JPG and /dev/null differ diff --git a/doc/html/Tutor/compound.html b/doc/html/Tutor/compound.html deleted file mode 100644 index 93df933..0000000 --- a/doc/html/Tutor/compound.html +++ /dev/null @@ -1,234 +0,0 @@ - -HDF5 Tutorial - Compound Datatypes - - - - - - - - - [ HDF5 Tutorial Top ] -

      -Compound Datatypes -

      - -
      - - -

      Contents:

      - -
      - -

      Creating Compound Datatypes

      -A compound datatype is similar to a struct in C or a common block in -FORTRAN. It is a collection of one or more atomic types or small arrays of -such types. To create and use a compound datatype you need to be familiar -with various properties of the compound datatype: -
        -
      • It is of class compound. -
      • It has a fixed total size, in bytes. -
      • It consists of zero or more members (defined in any order) with - unique names and occupying non-overlapping regions within the datum. -
      • Each member has its own datatype. -
      • Each member is referenced by an index number between zero and N-1, - where N is the number of members in the compound datatype. -
      • Each member has a name which is unique among its siblings in a - compound datatype. -
      • Each member has a fixed byte offset, which locates the first byte - (smallest byte address) of that member in the compound datatype. -
      • Each member can be a small array of up to four dimensions. -
      -Properties of members of a compound datatype are defined when the -member is added to the compound datatype and cannot be subsequently modified. -

      -Compound datatypes must be built out of other datatypes. First, one -creates an empty compound datatype and specifies its total size. Then -members are added to the compound datatype in any order. - -

      Programming Example

      -
      -

      Description

      - -This example shows how to create a compound datatype, write an array -to the file which uses the compound datatype, and read back subsets of -the members. -

      -

      -The program outputs the following: -
      -
      -Field c : 
      -1.0000 0.5000 0.3333 0.2500 0.2000 0.1667 0.1429 0.1250 0.1111 0.1000 
      -
      -Field a : 
      -0 1 2 3 4 5 6 7 8 9 
      -
      -Field b : 
      -0.0000 1.0000 4.0000 9.0000 16.0000 25.0000 36.0000 49.0000 64.0000 81.0000 
      -
      -+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
      -
      - - -

      Remarks

      -
        -
      • H5Tcreate creates a new datatype of the specified class with -the specified number of bytes. -
        -    hid_t H5Tcreate ( H5T_class_t class, size_t size ) 
        -
        -
          -
        • The class parameter specifies the datatype to create. -Currently only the H5T_COMPOUND datatype class is supported with this -function. -
        • The size parameter specifies the number of bytes in the -datatype to create. -
        -

        -

      • H5Tinsert adds a member to the compound datatype specified by -type_id. -
        -   herr_t H5Tinsert ( hid_t type_id, const char * name, off_t offset, 
        -                      hid_t field_id ) 
        -
        -
          -
        • The type_id parameter is the identifier of the compound datatype -to modify. -
        • The name parameter is the name of the field to insert. The new -member name must be unique within a compound datatype. -
        • The offset parameter is the offset in the memory structure of -the field to insert. - -The library defines the HOFFSET macro to compute the offset of a member within -a struct: -
          -  HOFFSET ( s, m ) 
          -
          -This macro computes the offset of member m within a struct -variable s. - -
        • The field_id parameter is the datatype identifier of the -field to insert. -
        -

        -

      • H5Tclose releases a datatype. -
        -   herr_t H5Tclose ( hid_t type_id ) 
        -
        -The type_id parameter is the identifier of the datatype to release. -
      -
      -

      File Contents

      - -
      -HDF5 "SDScompound.h5" {
      -GROUP "/" {
      -   DATASET "ArrayOfStructures" {
      -      DATATYPE {
      -         H5T_STD_I32BE "a_name";
      -         H5T_IEEE_F32BE "b_name";
      -         H5T_IEEE_F64BE "c_name";
      -      }
      -      DATASPACE { SIMPLE ( 10 ) / ( 10 ) }
      -      DATA {
      -         {
      -            [ 0 ],
      -            [ 0 ],
      -            [ 1 ]
      -         },
      -         {
      -            [ 1 ],
      -            [ 1 ],
      -            [ 0.5 ]
      -         },
      -         {
      -            [ 2 ],
      -            [ 4 ],
      -            [ 0.333333 ]
      -         },
      -         {
      -            [ 3 ],
      -            [ 9 ],
      -            [ 0.25 ]
      -         },
      -         {
      -            [ 4 ],
      -            [ 16 ],
      -            [ 0.2 ]
      -         },
      -         {
      -            [ 5 ],
      -            [ 25 ],
      -            [ 0.166667 ]
      -         },
      -         {
      -            [ 6 ],
      -            [ 36 ],
      -            [ 0.142857 ]
      -         },
      -         {
      -            [ 7 ],
      -            [ 49 ],
      -            [ 0.125 ]
      -         },
      -         {
      -            [ 8 ],
      -            [ 64 ],
      -            [ 0.111111 ]
      -         },
      -         {
      -            [ 9 ],
      -            [ 81 ],
      -            [ 0.1 ]
      -         }
      -      }
      -   }
      -}
      -}
      -
      - - - - - -


      - -
      NCSA
      - The National Center for Supercomputing Applications

      - University of Illinois - at Urbana-Champaign
      -
      - - -hdfhelp@ncsa.uiuc.edu -
      -
      Last Modified: June 22, 2001

      - - -
      -
      - - - - - - - diff --git a/doc/html/Tutor/crtatt.html b/doc/html/Tutor/crtatt.html deleted file mode 100644 index 3a4d374..0000000 --- a/doc/html/Tutor/crtatt.html +++ /dev/null @@ -1,343 +0,0 @@ - -HDF5 Tutorial - Creating an Attribute - - - - - - - - - [ HDF5 Tutorial Top ] -

      -Creating an Attribute -

      - -
      - - -

      Contents:

      - -
      - -

      What is an Attribute?

      -

      -Attributes are small datasets that can be used to describe the nature and/or -the intended usage of the object they are attached to. In this section, we -show how to create, read, and write an attribute. -

      -

      -

      Creating an attribute

      -

      - Creating an attribute is similar to creating a dataset. To create an - attribute, the application must specify the object which the attribute is - attached to, the datatype and dataspace of the attribute data, - and the attribute creation property list. -

      - The steps to create an attribute are as follows: -

        -
      1. Obtain the object identifier that the attribute is to be attached to. -
      2. Define the characteristics of the attribute and specify the - attribute creation property list. -
          -
        • Define the datatype. -
        • Define the dataspace. -
        • Specify the attribute creation property list. -
        -
      3. Create the attribute. -
      4. Close the attribute and datatype, dataspace, and - attribute creation property list, if necessary. -
      -

      - To create and close an attribute, the calling program must use -H5Acreate/h5acreate_f and -H5Aclose/h5aclose_f. For example: -

      -C: -

      -     attr_id = H5Acreate (dset_id, attr_name, type_id, space_id, creation_prp);
      -     status = H5Aclose (attr_id);
      -
      -FORTRAN: -
      -     CALL h5acreate_f (dset_id, attr_nam, type_id, space_id, attr_id, &
      -                       hdferr, creation_prp=creat_plist_id)
      -          or
      -     CALL h5acreate_f (dset_id, attr_nam, type_id, space_id, attr_id, hdferr)
      -
      -     CALL h5aclose_f (attr_id, hdferr)
      -
      - -

      Reading/Writing an attribute

      -

      - Attributes may only be read or written as an entire object; no partial I/O is - supported. Therefore, to perform I/O operations on an attribute, the - application needs only to specify the attribute and the attribute's memory - datatype. -

      - The steps to read or write an attribute are as follows. -

        -
      1. Obtain the attribute identifier. -
      2. Specify the attribute's memory datatype. -
      3. Perform the desired operation. -
      4. Close the memory datatype if necessary. -
      -

      -To read and/or write an attribute, the calling program must contain the -H5Aread/h5aread_f and/or -H5Awrite/h5awrite_f routines. For example: -

      -C: -

      -    status = H5Aread (attr_id, mem_type_id, buf);
      -    status = H5Awrite (attr_id, mem_type_id, buf);
      -
      -FORTRAN: -
      -    CALL h5awrite_f (attr_id, mem_type_id, buf, hdferr)  
      -    CALL h5aread_f (attr_id, mem_type_id, buf, hdferr)
      -
      -

      -

      Programming Example

      -
      -

      Description

      -This example shows how to create and write a dataset attribute. -It opens an existing file dset.h5 in C -(dsetf.h5 in FORTRAN), -obtains the identifier of the dataset /dset, -defines the attribute's dataspace, creates the dataset attribute, writes -the attribute, and then closes the attribute's dataspace, attribute, dataset, -and file.
      -
      - -NOTE: To download a tar file of the examples, including a Makefile, -please go to the References page. - - - -

      Remarks

      -
        -
      • H5Acreate/h5acreate_f creates an attribute - which is attached to the object specified by the first parameter, - and returns an identifier. -

        -C: -

        -  hid_t H5Acreate (hid_t obj_id, const char *name, hid_t type_id, 
        -                   hid_t space_id, hid_t creation_prp) 
        -
        -FORTRAN: -
        -  h5acreate_f (obj_id, name, type_id, space_id, attr_id, &
        -               hdferr, creation_prp) 
        -
        -            obj_id        INTEGER(HID_T)
        -            name          CHARACTER(LEN=*)
        -            type_id       INTEGER(HID_T)
        -            space_id      INTEGER(HID_T)
        -            attr_id       INTEGER(HID_T)
        -            hdferr        INTEGER
        -                          (Possible values: 0 on success and -1 on failure)
        -            creation_prp  INTEGER(HID_T), OPTIONAL
        -
        -
        -
          -
        • The obj_id parameter is the identifier of the object that - the attribute is attached to. -

          -

        • The name parameter is the name of the attribute to create. -

          -

        • The type_id parameter is the identifier of the - attribute's datatype. -

          -

        • The space_id parameter is the identifier of the attribute's - dataspace. -

          -

        • The creation_prp parameter is the creation property list - identifier. - H5P_DEFAULT in C (H5P_DEFAULT_F in FORTRAN) - specifies the default creation property list. - This parameter is optional in FORTRAN; when it is omitted, - the default creation property list is used. -

          -

        • In FORTRAN, the return code for this call is returned in hdferr: - 0 if successful, -1 if not. The attribute identifier is returned - in attr_id. In C, the function returns the - attribute identifier if successful and a negative value if not. - - -
        -

        -

      • H5Awrite/h5awrite_f writes the entire attribute, - and returns the status of the write. -

        -C: -

        -  herr_t H5Awrite (hid_t attr_id, hid_t mem_type_id, void *buf) 
        -
        -FORTRAN: -
        -  h5awrite_f (attr_id, mem_type_id, buf, hdferr)   
        -
        -            attr_id     INTEGER(HID_T)
        -            memtype_id  INTEGER(HID_T)
        -            buf         TYPE(VOID)
        -            hdferr      INTEGER
        -                        (Possible values: 0 on success and -1 on failure)
        -
        -
        -
          -
        • The attr_id parameter is the identifier of the attribute - to write. -

          -

        • The mem_type_id parameter is the identifier of the - attribute's memory datatype. -

          -

        • The buf parameter is the data buffer to write out. -

          -

        • In C, this function returns a non-negative value if successful and - a negative value, otherwise. In FORTRAN, the return value is in the - hdferr parameter: 0 if successful, -1 otherwise. -
        -

        -

      • When an attribute is no longer accessed by a program, - H5Aclose/h5aclose_f must be called - to release the attribute from use. - The C routine returns a non-negative value if successful; - otherwise it returns a negative value. - In FORTRAN, the return value is in the hdferr parameter: - 0 if successful, -1 otherwise. -

        -C: -

        -  herr_t H5Aclose (hid_t attr_id) 
        -
        - -FORTRAN: -
        -  h5aclose_f (attr_id, hdferr)
        -
        -            attr_id  INTEGER(HID_T)
        -            hdferr   INTEGER
        -                     (Possible values: 0 on success and -1 on failure)
        -
        -
        -
          -
        • An H5Aclose/h5aclose_f call is mandatory. -
        -
      - - - -
      -

      File Contents

      -

      -The contents of dset.h5 (dsetf.h5 for FORTRAN) and the -attribute definition are shown below: -

      -Fig. 7.1a   dset.h5 in DDL - -

      -HDF5 "dset.h5" {
      -GROUP "/" {
      -   DATASET "dset" {
      -      DATATYPE { H5T_STD_I32BE }
      -      DATASPACE { SIMPLE ( 4, 6 ) / ( 4, 6 ) }
      -      DATA {
      -         1, 2, 3, 4, 5, 6,
      -         7, 8, 9, 10, 11, 12,
      -         13, 14, 15, 16, 17, 18,
      -         19, 20, 21, 22, 23, 24
      -      }
      -      ATTRIBUTE "attr" {
      -         DATATYPE { H5T_STD_I32BE }
      -         DATASPACE { SIMPLE ( 2 ) / ( 2 ) }
      -         DATA {
      -            100, 200
      -         }
      -      }
      -   }
      -}
      -}
      -
      -Fig. 7.1b   dsetf.h5 in DDL -
      -HDF5 "dsetf.h5" {
      -GROUP "/" {
      -   DATASET "dset" {
      -      DATATYPE { H5T_STD_I32BE }
      -      DATASPACE { SIMPLE ( 6, 4 ) / ( 6, 4 ) }
      -      DATA {
      -         1, 7, 13, 19,
      -         2, 8, 14, 20,
      -         3, 9, 15, 21,
      -         4, 10, 16, 22,
      -         5, 11, 17, 23,
      -         6, 12, 18, 24
      -      }
      -      ATTRIBUTE "attr" {
      -         DATATYPE { H5T_STD_I32BE }
      -         DATASPACE { SIMPLE ( 2 ) / ( 2 ) }
      -         DATA {
      -            100, 200
      -         }
      -      }
      -   }
      -}
      -}
      -
      - - - -
      -

      Attribute Definition in DDL

      -Fig. 7.2   HDF5 Attribute Definition -
      -
      -     <attribute> ::= ATTRIBUTE "<attr_name>" { <datatype>
      -                                               <dataspace>
      -                                               <data>  }
      -
      -
      - - - - -


      - -
      NCSA
      - The National Center for Supercomputing Applications

      - University of Illinois - at Urbana-Champaign
      -
      - - -hdfhelp@ncsa.uiuc.edu -
      -
      Last Modified: June 22, 2001

      - -
      -
      - - - - - - - diff --git a/doc/html/Tutor/crtdat.html b/doc/html/Tutor/crtdat.html deleted file mode 100644 index aafb8ea..0000000 --- a/doc/html/Tutor/crtdat.html +++ /dev/null @@ -1,497 +0,0 @@ - -HDF5 Tutorial - Creating a Dataset - - - - - - - - - [ HDF5 Tutorial Top ] -

      -Creating a Dataset -

      - -
      - - -

      Contents:

      - -
      - -

      What is a Dataset?

      -

      -A dataset is a multidimensional array of data elements, together with -supporting metadata. To create a dataset, the application program must specify -the location at which to create the dataset, the dataset name, the datatype -and dataspace of the data array, and the dataset creation property list. -

      -

      Datatypes

      - A datatype is a collection of datatype properties, all of which can - be stored on disk, and which when taken as a whole, provide complete - information for data conversion to or from that datatype. -

      - There are two categories of datatypes in HDF5: atomic and compound - datatypes. An atomic datatype is a datatype which cannot be - decomposed into smaller datatype units at the API level. - These include the integer, float, date and time, string, bitfield, and - opaque datatypes. - A compound datatype is a collection of one or more - atomic datatypes and/or small arrays of such datatypes. -

      - Figure 5.1 shows the HDF5 datatypes. Some of the HDF5 predefined - atomic datatypes are listed in Figures 5.2a and 5.2b. - In this tutorial, we consider only HDF5 predefined integers. - For further information on datatypes, see - The Datatype Interface (H5T) in the - HDF5 User's Guide. -

      - Fig 5.1   HDF5 datatypes -

      -
      -                                          +--  integer
      -                                          +--  floating point
      -                        +---- atomic  ----+--  date and time
      -                        |                 +--  character string
      -       HDF5 datatypes --|                 +--  bitfield
      -                        |                 +--  opaque
      -                        |
      -                        +---- compound
      -
      -
      - - -
      - - Fig. 5.2a   Examples of HDF5 predefined datatypes - - - - - - - - - - - - - - - - - - - - - - - - - - -
      DatatypeDescription
      H5T_STD_I32LEFour-byte, little-endian, signed, two's complement integer
      H5T_STD_U16BETwo-byte, big-endian, unsigned integer
      H5T_IEEE_F32BEFour-byte, big-endian, IEEE floating point
      H5T_IEEE_F64LEEight-byte, little-endian, IEEE floating point
      H5T_C_S1One-byte, null-terminated string of eight-bit characters
      - -
      - - Fig. 5.2b   Examples of HDF5 predefined native datatypes - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
      Native DatatypeCorresponding C or FORTRAN Type
      C: 
      H5T_NATIVE_INTint
      H5T_NATIVE_FLOATfloat
      H5T_NATIVE_CHARchar
      H5T_NATIVE_DOUBLEdouble
      H5T_NATIVE_LDOUBLElong double
      FORTRAN: 
      H5T_NATIVE_INTinteger
      H5T_NATIVE_REALreal
      H5T_NATIVE_DOUBLEdouble precision
      H5T_NATIVE_CHARcharacter
      - -
      - -

      Datasets and Dataspaces

      - - A dataspace describes the dimensionality of the data array. A dataspace - is either a regular N-dimensional array of data points, called a simple - dataspace, or a more general collection of data points organized in - another manner, called a complex dataspace. Figure 5.3 shows HDF5 dataspaces. - In this tutorial, we only consider simple dataspaces. -

      - Fig 5.3   HDF5 dataspaces -

      -
      -                         +-- simple
      -       HDF5 dataspaces --|
      -                         +-- complex
      -
      -
      - The dimensions of a dataset can be fixed (unchanging), or they may be - unlimited, which means that they are extensible. A dataspace can also - describe a portion of a dataset, making it possible to do partial I/O - operations on selections. - -

      Dataset Creation Property Lists

      - - When creating a dataset, HDF5 allows the user to specify how raw data is - organized and/or compressed on disk. This information is - stored in a dataset creation property list and passed to the dataset - interface. The raw data on disk can be stored contiguously (in the same - linear way that it is organized in memory), partitioned into chunks, - stored externally, etc. In this tutorial, we use the - default dataset creation property list; that is, contiguous storage layout - and no compression are used. For more information about - dataset creation property lists, - see The Dataset Interface (H5D) - in the HDF5 User's Guide. - -

      -In HDF5, datatypes and dataspaces are independent objects which are created -separately from any dataset that they might be attached to. Because of this, -the creation of a dataset requires definition of the datatype and dataspace. -In this tutorial, we use HDF5 predefined datatypes (integer) and consider -only simple dataspaces. Hence, only the creation of dataspace objects is -needed. -

      - -To create an empty dataset (no data written) the following steps need to be -taken: -

        -
      1. Obtain the location identifier where the dataset is to be created. -
      2. Define the dataset characteristics and the dataset creation property list. -
          -
        • Define a datatype. -
        • Define a dataspace. -
        • Specify the dataset creation property list. -
        -
      3. Create the dataset. -
      4. Close the datatype, the dataspace, and the property list if necessary. -
      5. Close the dataset. -
      -To create a simple dataspace, the calling program must contain a -call to create and close the dataspace. For example: -

      -C: -

      -   space_id = H5Screate_simple (rank, dims, maxdims);
      -   status = H5Sclose (space_id );
      -
      -FORTRAN: -
      -   CALL h5screate_simple_f (rank, dims, space_id, hdferr, maxdims=max_dims)
      -        or
      -   CALL h5screate_simple_f (rank, dims, space_id, hdferr)
      -
      -   CALL h5sclose_f (space_id, hdferr)
      -
      - -To create a dataset, the calling program must contain calls to create -and close the dataset. For example: -

      -C: -

      -   dset_id = H5Dcreate (hid_t loc_id, const char *name, hid_t type_id,
      -                          hid_t space_id, hid_t creation_prp);
      -   status = H5Dclose (dset_id);
      -
      -FORTRAN: -
      -   CALL h5dcreate_f (loc_id, name, type_id, space_id, dset_id, &
      -                     hdferr, creation_prp=creat_plist_id)
      -        or
      -   CALL h5dcreate_f (loc_id, name, type_id, space_id, dset_id, hdferr)
      -
      -   CALL h5dclose_f (dset_id, hdferr)
      -
      -If using the pre-defined datatypes in FORTRAN, then a call must -be made to initialize and terminate access to the pre-defined datatypes: -
      -  CALL h5init_types_f (hdferr) 
      -  CALL h5close_types_f (hdferr)
      -
      -h5init_types_f must be called before any HDF5 library -subroutine calls are made; -h5close_types_f must be called after the final HDF5 library -subroutine call. -See the programming example below for an illustration of the use of -these calls. - -

      -

      Programming Example

      - -

      Description

      -The following example shows how to create an empty dataset. -It creates a file called dset.h5 in the C version -(dsetf.h5 in Fortran), defines the dataset dataspace, creates a -dataset which is a 4x6 integer array, and then closes the dataspace, -the dataset, and the file.
      -
      - -NOTE: To download a tar file of the examples, including a Makefile, -please go to the References page of this tutorial. - - -

      Remarks

      -
        -
      • H5Screate_simple/h5screate_simple_f -creates a new simple dataspace and returns a dataspace identifier. -
        -C:
        -  hid_t H5Screate_simple (int rank, const hsize_t * dims, 
        -                          const hsize_t * maxdims)
        -FORTRAN:
        -  h5screate_simple_f (rank, dims, space_id, hdferr, maxdims) 
        -
        -            rank        INTEGER
        -            dims(*)     INTEGER(HSIZE_T)
        -            space_id    INTEGER(HID_T)
        -            hdferr      INTEGER 
        -                        (Valid values: 0 on success and -1 on failure)
        -            maxdims(*)  INTEGER(HSIZE_T), OPTIONAL
        -
        -
          -
        • The rank parameter specifies the rank, i.e., the number of - dimensions, of the dataset. - -
        • The dims parameter specifies the size of the dataset. - -
        • The maxdims parameter specifies the upper limit on the - size of the dataset. - If this parameter is NULL in C (or not specified in FORTRAN), - then the upper limit is the same as the dimension - sizes specified by the dims parameter. -
        • The function returns the dataspace identifier in C if successful; - otherwise it returns a negative value. - In FORTRAN, the dataspace identifier - is returned in the space_id parameter. If the call is successul - then a 0 is returned in hdferr; otherwise a -1 is returned. -
        -

        -

      • H5Dcreate/h5dcreate_f creates a dataset -at the specified location and returns a dataset identifier. -
        -C:
        -  hid_t H5Dcreate (hid_t loc_id, const char *name, hid_t type_id, 
        -                   hid_t space_id, hid_t creation_prp) 
        -FORTRAN:
        -  h5dcreate_f (loc_id, name, type_id, space_id, dset_id, & 
        -               hdferr, creation_prp) 
        -
        -            loc_id        INTEGER(HID_T)
        -            name          CHARACTER(LEN=*)
        -            type_id       INTEGER(HID_T)
        -            space_id      INTEGER(HID_T)
        -            dset_id       INTEGER(HID_T)
        -            hdferr        INTEGER 
        -                          (Valid values: 0 on success and -1 on failure)
        -            creation_prp  INTEGER(HID_T), OPTIONAL
        -
        -
          -
        • The loc_id parameter is the location identifier. -

          -

        • The name parameter is the name of the dataset to create. - -

          -

        • The type_id parameter specifies the datatype identifier. - -

          -

        • The space_id parameter is the dataspace identifier. - -

          -

        • The creation_prp parameter specifies the - dataset creation property list. - H5P_DEFAULT in C and H5P_DEFAULT_F in FORTRAN - specify the default dataset creation property list. - This parameter is optional in FORTRAN; if it is omitted, - the default dataset creation property list will be used. -

          -

        • The C function returns the dataset identifier if successful and - a negative value otherwise. The FORTRAN call returns the - dataset identifier in dset_id. If it is successful, then 0 is - returned in hdferr; otherwise a -1 is returned. - -
        -

        -

      • H5Dcreate/h5dcreate_f creates an empty array -and initializes the data to 0. -

        -

      • When a dataset is no longer accessed by a program, -H5Dclose/h5dclose_f must be called to release -the resource used by the dataset. This call is mandatory. -
        -C:
        -    hid_t H5Dclose (hid_t dset_id)
        -FORTRAN:
        -    h5dclose_f (dset_id, hdferr)
        -
        -            dset_id  INTEGER(HID_T)
        -            hdferr   INTEGER 
        -                     (Valid values: 0 on success and -1 on failure)
        -
        -
      - -
      -

      File Contents

      -The contents of the file dset.h5 (dsetf.h5 -for FORTRAN) are shown in Figure 5.4 and Figures 5.5a -and 5.5b. -

      - -
      -Figure 5.4   Contents of dset.h5 ( dsetf.h5) -
      - -
      - - - - - - - - - - -
      Figure 5.5a   dset.h5 in DDL Figure 5.5b   dsetf.h5 in DDL
      -
      -HDF5 "dset.h5" {
      -GROUP "/" {
      -   DATASET "dset" {
      -      DATATYPE { H5T_STD_I32BE }
      -      DATASPACE { SIMPLE ( 4, 6 ) / ( 4, 6 ) }
      -      DATA {
      -         0, 0, 0, 0, 0, 0,
      -         0, 0, 0, 0, 0, 0,
      -         0, 0, 0, 0, 0, 0,
      -         0, 0, 0, 0, 0, 0
      -      }
      -   }
      -}
      -}
      -
      -
      -
            
      -HDF5 "dsetf.h5" {
      -GROUP "/" {
      -   DATASET "dset" {
      -      DATATYPE { H5T_STD_I32BE }
      -      DATASPACE { SIMPLE ( 6, 4 ) / ( 6, 4 ) }
      -      DATA {
      -         0, 0, 0, 0,
      -         0, 0, 0, 0,
      -         0, 0, 0, 0,
      -         0, 0, 0, 0,
      -         0, 0, 0, 0,
      -         0, 0, 0, 0
      -      }
      -   }
      -}
      -}
      -
      -
      - -

      -Note in Figures 5.5a and 5.5b that -H5T_STD_I32BE, a 32-bit Big Endian integer, -is an HDF atomic datatype. - - - -

      Dataset Definition in DDL

      -The following is the simplified DDL dataset definition: -

      - Fig. 5.6   HDF5 Dataset Definition -

      -      <dataset> ::= DATASET "<dataset_name>" { <datatype>
      -                                               <dataspace>
      -                                               <data>
      -                                               <dataset_attribute>* }
      -
      -      <datatype> ::= DATATYPE { <atomic_type> }
      -
      -      <dataspace> ::= DATASPACE { SIMPLE <current_dims> / <max_dims> }
      -
      -      <dataset_attribute> ::= <attribute>
      -
      - - - - -


      - -
      NCSA
      - The National Center for Supercomputing Applications

      - University of Illinois - at Urbana-Champaign
      -
      - - -hdfhelp@@ncsa.uiuc.edu -
      -
      Last Modified: June 22, 2001

      - - -
      -
      - - - - - diff --git a/doc/html/Tutor/crtfile.html b/doc/html/Tutor/crtfile.html deleted file mode 100644 index fc235c4..0000000 --- a/doc/html/Tutor/crtfile.html +++ /dev/null @@ -1,317 +0,0 @@ - -HDF5 Tutorial - Creating an HDF5 File - - - - - - - - - [ HDF5 Tutorial Top ] -

      -Creating an HDF5 File -

      - -
      - - -

      Contents:

      - -
      - -

      What is an HDF5 file?

      -

      -An HDF5 file is a binary file containing scientific data and supporting -metadata. The primary types of objects stored in an HDF5 file, groups and -datasets, will be discussed in other sections of this tutorial. -

      -To create a file, an application must specify a filename, file -access mode, file creation property list, and file access property list. -

      -

      -

      -The steps to create and close an HDF5 file are as follows: -

        -
      1. Specify the file creation and access property lists, if necessary. -
      2. Create the file. -
      3. Close the file and close the property lists, if necessary. -
      -To create an HDF5 file, the calling program must contain calls to -create and close the file. For example: -

      -C:

      -   file_id = H5Fcreate (filename, access_mode, create_id, access_id);
      -   status = H5Fclose (file_id); 
      -
      -FORTRAN:
      -   CALL h5fcreate_f (filename, access_mode, file_id, hdferr, &
      -            creation_prp=create_id, access_prp=access_id)
      -        or
      -   CALL h5fcreate_f (filename, access_mode, file_id, hdferr)
      -
      -   CALL h5fclose_f (file_id, hdferr)
      -
      -In FORTRAN, the file creation property list, creation_prp, -and file access property list, access_prp, -are optional parameters; -they can be omitted if the default values are to be used. -

      -

      Programming Example

      - -

      Description

      -The following example demonstrates how to create and close an HDF5 file. -It creates a file called file.h5 in the C version, -filef.h5 in FORTRAN, and then closes the file.

      - -

      -

      -NOTE: To download a tar file of all of the examples, including -a Makefile, please go to the References page. - - -

      Remarks

      -
        -
      • In C: - The include file hdf5.h contains definitions and declarations - and must be included in any program that uses the HDF5 library. -
        In FORTRAN: - The module HDF5 contains definitions and declarations - and must be used in any program that uses the HDF5 library. -

        -

      • H5Fcreate/h5fcreate_f creates - an HDF5 file and returns the file identifier. -
        -C:       
        -  hid_t H5Fcreate (const char *name, unsigned access_mode, hid_t creation_prp, 
        -                   hid_t access_prp) 
        -FORTRAN: 
        -  h5fcreate_f (name, access_mode, file_id, hdferr, creation_prp, access_prp)
        -
        -           name          CHARACTER(LEN=*)
        -           access_flag   INTEGER 
        -                         (Valid values: H5F_ACC_RDWR_F, H5F_ACC_RDONLY_F, 
        -                         H5F_ACC_TRUNC_F, H5F_ACC_EXCL_F, H5F_ACC_DEBUG_F)
        -           file_id       INTEGER(HID_T)
        -           hdferr        INTEGER 
        -                         (Valid values: 0 on success and -1 on failure)
        -           creation_prp  INTEGER(HID_T), OPTIONAL
        -                         (Default value: H5P_DEFAULT_F)
        -           access_prp    INTEGER(HID_T), OPTIONAL
        -                         (Default value: H5P_DEFAULT_F) 
        -         
        -
        -
          -
        • The name parameter specifies the name of the file to be created. -

          -

        • The access_mode parameter specifies the file access mode. - H5F_ACC_TRUNC (H5F_ACC_TRUNC_F in FORTRAN) - will truncate a file if it already exists. -

          -

        • The creation_prp parameter - specifies the file creation property list. - For C, using H5P_DEFAULT indicates that the - default file creation property list is to be used. - This option is optional in FORTRAN; if it is omitted, the default file - creation property list, H5P_DEFAULT_F, is used. -

          -

        • The access_prp parameter - specifies the file access property list. - For C, using H5P_DEFAULT indicates that the - default file creation property list is to be used. - This option is optional in FORTRAN; if it is omitted, the default file - creation property list, H5P_DEFAULT_F, is used. -

          -

        • In C, this function returns the file identifier if successful and - a negative value otherwise. - In FORTRAN, the file identifier is returned in the - file_id parameter. If the call is successful, 0 (zero) is - passed back in the hdferr parameter. Otherwise, hdferr - will have a value of -1. - -
        -

        -

      • When a file is no longer accessed by a program, - H5Fclose/h5fclose_f - must be called to release the resources used by the file. This call - is mandatory. -
        -C:
        -    herr_t H5Fclose (hid_t file_id) 
        -
        -FORTRAN:
        -    h5fclose_f(file_id, hdferr)
        -
        -

        -

      • The root group is automatically created when a file is created. - Every file has a root group and the path name of the root group is - always /. -
      -
      -

      File Contents

      -The HDF team has developed tools for examining the contents of HDF5 files. -The tool used in this tutorial is the HDF5 dumper, h5dump, -which displays the file contents in human-readable form. -The output of h5dump is an ASCII display formatted according -to the HDF5 DDL grammar. -This grammar is defined, using Backus-Naur Form, in the -
      DDL in BNF for HDF5. -

      -To view the file contents, type: -

      -   h5dump <filename> 
      -
      - -Figure 4.1 describes the file contents of file.h5 (filef.h5) -using a directed graph. -The directed graphs in this tutorial use an oval to represent an HDF5 group -and a rectangle to represent an HDF5 dataset (none in this example). -Arrows indicate the inclusion direction of the contents (none in this example). - -

      -Fig. 4.1   Contents of file.h5 (filef.h5) -

      -
      -
      - -Figure 4.2 is the text description of file.h5, as generated by -h5dump. The HDF5 file called file.h5 contains -a group called /, or the root group. -(The file called filef.h5, -created by the FORTRAN version of the example, has the same output except -that the filename shown is filef.h5.) -

      - Fig. 4.2   file.h5 in DDL -

      -
      -         HDF5 "file.h5" {
      -         GROUP "/" {
      -         }
      -         }
      -
      -
      - - -

      File Definition in DDL

      - -Figure 4.3 is the simplified DDL file definition for creating an HDF5 file. -For simplicity, a simplified DDL is used in this tutorial. A complete and -more rigorous DDL can be found in the -
      DDL in BNF for HDF5, a section of the -HDF5 User's Guide. -

      - Fig. 4.3   HDF5 File Definition -

      - The following symbol definitions are used in the DDL: -

      -
      -        ::=               defined as
      -        <tname>           a token with the name tname
      -        <a> | <b>         one of <a> or <b>
      -        <a>*              zero or more occurrences of <a>
      -
      - The simplified DDL for file definition is as follows: -
      -        <file> ::= HDF5 "<file_name>" { <root_group> }
      -
      -        <root_group> ::= GROUP "/" { <group_attribute>* <group_member>* }
      -
      -        <group_attribute> ::= <attribute>
      -
      -        <group_member> ::= <group> | <dataset>
      -
      - - -


      - - NCSA
      - The National Center for Supercomputing Applications

      - University of Illinois - at Urbana-Champaign
      -
      - - -hdfhelp@ncsa.uiuc.edu -
      -
      Last Modified: June 22, 2001

      - - -
      - - - - - diff --git a/doc/html/Tutor/crtgrp.html b/doc/html/Tutor/crtgrp.html deleted file mode 100644 index 028553d..0000000 --- a/doc/html/Tutor/crtgrp.html +++ /dev/null @@ -1,202 +0,0 @@ - -HDF5 Tutorial - Creating a Group - - - - - - - - - - [ HDF5 Tutorial Top ] -

      -Creating a Group -

      - -
      - - -

      Contents:

      - -
      - -

      What is a Group?

      -

      -An HDF5 group is a structure containing zero or more HDF5 objects. The two -primary HDF5 objects are groups and datasets. To create a group, the calling -program must: -

        -
      1. Obtain the location identifier where the group is to be created. -
      2. Create the group. -
      3. Close the group. -
      -To create a group, the calling program must call -H5Gcreate/h5gcreate_f. -To close the group, H5Gclose/h5gclose_f -must be called. For example: -

      -C: -

      -  group_id = H5Gcreate (loc_id, name, size_hint);
      -  status = H5Gclose (group_id);
      -
      -FORTRAN: -
      -  CALL h5gcreate_f (loc_id, name, group_id, error, size_hint=size)
      -       or
      -  CALL h5gcreate_f (loc_id, name, group_id, error)
      -
      -
      -  CALL h5gclose_f (group_id, error)
      -
      - - -

      -

      Programming Example

      -
      -

      Description

      -The following example shows how to create and close a group. It creates a file -called group.h5 (groupf.h5 for FORTRAN), -creates a group called MyGroup in the root group, -and then closes the group and file.
      -
      -NOTE: To download a tar file of the examples, including a Makefile, -please go to the References page. - - - -

      Remarks

      -
        -
      • H5Gcreate/h5gcreate_f creates - a new empty group, named MyGroup and located in the - root group, and returns a group identifier. -

        -C: -

        -  hid_t H5Gcreate (hid_t loc_id, const char *name, size_t size_hint) 
        -
        -FORTRAN: -
        -  h5gcreate_f (loc_id, name, group_id, hdferr, size_hint)
        -
        -           loc_id     INTEGER(HID_T)
        -           name       CHARACTER(LEN=*)
        -           group_id   INTEGER(HID_T)
        -           hdferr     INTEGER
        -                      (Possible values: 0 on success and -1 on failure)
        -           size_hint  INTEGER(SIZE_T), OPTIONAL
        -                      (Default value: OBJECT_NAMELEN_DEFAULT_F)
        -         
        -
        -
          -
        • The loc_id parameter specifies the location at which - to create the group. -

          -

        • The name parameter specifies the name of the group to be created. -

          -

        • The size_hint parameter specifies how much file space to - reserve to store the - names that will appear in the group. If a non-positive value is supplied, - then a default size is used. Passing a value of zero is usually adequate - since the library is able to dynamically resize the name heap. -

          -

        • In FORTRAN, the return value for the routine is passed in - hdferr: 0 if successful, -1 otherwise. The group identifier - is passed back in group_id. In C, the function returns a valid - group identifier if successful and a negative value otherwise. - -
        -

        -

      • H5Gclose/h5gclose_f closes the group. - This call is mandatory. -

        -C: -

        -  herr_t H5Gclose (hid_t group_id) 
        -
        -FORTRAN: -
        -  h5gclose_f (group_id, hdferr)
        -
        -           group_id  INTEGER(HID_T)
        -           hdferr    INTEGER
        -                     (Possible values: 0 on success and -1 on failure)
        -         
        -
        -
      - -
      -

      File Contents

      -The contents of group.h5 and the -definition of the group are shown below. (The FORTRAN program -creates the HDF5 file groupf.h5 and the resulting DDL shows -groupf.h5 in the first line.) -

      - - - - - - - - - - - - -
      Fig. 8.1   The Contents of group.h5. -   - Fig. 8.2   group.h5 in DDL
        -
             
      -HDF5 "group.h5" {
      -GROUP "/" {
      -   GROUP "MyGroup" {
      -   }
      -}
      -}
      -
      -
      - - - - -


      - -
      NCSA
      - The National Center for Supercomputing Applications

      - University of Illinois - at Urbana-Champaign
      -
      - - -hdfhelp@ncsa.uiuc.edu -
      -
      Last Modified: June 22, 2001

      - -
      -
      - - - - - - - diff --git a/doc/html/Tutor/crtgrpar.html b/doc/html/Tutor/crtgrpar.html deleted file mode 100644 index dfbf5ad..0000000 --- a/doc/html/Tutor/crtgrpar.html +++ /dev/null @@ -1,229 +0,0 @@ - -HDF5 Tutorial - Creating Groups using Absolute and Relative Names - - - - - - - - - [ HDF5 Tutorial Top ] -

      -Creating Groups Using -Absolute and Relative Names -

      - -
      - - -

      Contents:

      - -
      - -

      Absolute vs. Relative Names

      -

      -Recall that to create an HDF5 object, we have to specify the location where the -object is to be created. This location is determined by the identifier of an HDF5 -object and the name of the object to be created. The name of the created -object can be either an absolute name or a name relative to the specified -identifier. -In the previous example, we used the file identifier and the absolute name -/MyGroup to create a group. -

      -In this section, we discuss HDF5 names and show how to use absolute and -relative names. - -

      Names

      - -HDF5 object names are a slash-separated list of components. There are few -restrictions on names: component names may be any length except zero and may -contain any character except slash (/) and the null terminator. -A full name -may be composed of any number of component names separated by slashes, with any -of the component names being the special name . (a dot or period). -A name which begins with a slash is an absolute name which is accessed -beginning with the root group of the file; -all other names are relative names and and the named object is -accessed beginning with the specified group. -Multiple consecutive slashes in a full name are treated as single slashes -and trailing slashes are not significant. A special case is the name / (or -equivalent) which refers to the root group. -

      -Functions which operate on names generally take a location identifier, which -can be either a file identifier or a group identifier, and perform the lookup -with respect to that location. -Several possibilities are described in the following table: - -

      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
      Location Type Object NameDescription
      File identifier -
      /foo/bar
      -
      The object bar in group foo - in the root group.
      Group identifier -
      /foo/bar
      -
      The object bar in group foo in the - root group of the file containing the specified group. - In other words, the group identifier's only purpose is to - specify a file.
      File identifier -
      /
      -
      The root group of the specified file.
      Group identifier -
      /
      -
      The root group of the file containing the specified group.
      Group identifier -
      foo/bar
      -
      The object bar in group foo in - the specified group.
      File identifier -
      .
      -
      The root group of the file.
      Group identifier -
      .
      -
      The specified group.
      Other identifier -
      .
      -
      The specified object.
      -
      - - -

      -

      Programming Example

      -
      -

      Description

      -The following example code shows how to create groups using absolute -and relative names. It creates three groups: the first two groups are -created using the file identifier and the group absolute names while the -third group is created using a group identifier and a name relative -to the specified group.
      -
      - -NOTE: To download a tar file of the examples, including a Makefile, -please go to the References page. - - - -

      Remarks

      -
        -
      • H5Gcreate/h5gcreate_f creates a group at the - location specified by a location identifier and a name. - The location identifier can be a file identifier or a group identifier - and the name can be relative or absolute. -

        -

      • The first H5Gcreate/h5gcreate_f creates the group - MyGroup in the root group of the specified file. -

        -

      • The second H5Gcreate/h5gcreate_f creates the group - Group_A in the group MyGroup in the root group - of the specified file. Note that the parent group (MyGroup) - already exists. -

        -

      • The third H5Gcreate/h5gcreate_f creates the group - Group_B in the specified group. -
      -
      -

      File Contents

      -The file contents are shown below: -

      -Fig. 9.1   The Contents of groups.h5 - (groupsf.h5 for FORTRAN) -

      - -

      - - - Fig. 9.2   groups.h5 in DDL - (for FORTRAN, the name in the first line is groupsf.h5) -
      -
      -      HDF5 "groups.h5" {
      -      GROUP "/" {
      -         GROUP "MyGroup" {
      -            GROUP "Group_A" {
      -            }
      -            GROUP "Group_B" {
      -            }
      -         }
      -      }
      -      }
      -
      -
      - - - -


      - -
      NCSA
      - The National Center for Supercomputing Applications

      - University of Illinois - at Urbana-Champaign
      -
      - - -hdfhelp@ncsa.uiuc.edu -
      -
      Last Modified: June 22, 2001

      - -
      -
      - - - - - - - diff --git a/doc/html/Tutor/crtgrpd.html b/doc/html/Tutor/crtgrpd.html deleted file mode 100644 index 97ae78c..0000000 --- a/doc/html/Tutor/crtgrpd.html +++ /dev/null @@ -1,163 +0,0 @@ - -HDF5 Tutorial - Creating Datasets in Groups - - - - - - - - - [ HDF5 Tutorial Top ] -

      -Creating Datasets in Groups -

      - -
      - - -

      Contents:

      - -
      - -

      Creating datasets in groups

      -We have shown how to create groups, datasets, and attributes. -In this section, we show how to create datasets in groups. -Recall that H5Dcreate/h5dcreate_f -creates a dataset at the location specified by a location identifier and -a name. Similar to H5Gcreate/h5gcreate_f, -the location identifier can be a -file identifier or a group identifier and the name can be -relative or absolute. The location identifier and the name together determine -the location where the dataset is to be created. If the location identifier -and name refer to a group, then the dataset is created in that group. - - -

      Programming Example

      -
      -

      Description

      -This example shows how to create a dataset in a particular group. -It opens the file created in the previous example and creates two datasets.
      -
      -NOTE: To download a tar file of the examples, including a Makefile, -please go to the References page. - - - -

      File Contents

      -

      -Fig. 10.1   The Contents of groups.h5 - (groupsf.h5 for FORTRAN) -

      -
      - - - - Fig. 10.2a   groups.h5 in DDL -
      -
      -HDF5 "groups.h5" {
      -GROUP "/" {
      -   GROUP "MyGroup" {
      -      GROUP "Group_A" {
      -         DATASET "dset2" {
      -            DATATYPE { H5T_STD_I32BE }
      -            DATASPACE { SIMPLE ( 2, 10 ) / ( 2, 10 ) }
      -            DATA {
      -               1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
      -               1, 2, 3, 4, 5, 6, 7, 8, 9, 10
      -            }
      -         }
      -      }
      -      GROUP "Group_B" {
      -      }
      -      DATASET "dset1" {
      -         DATATYPE { H5T_STD_I32BE }
      -         DATASPACE { SIMPLE ( 3, 3 ) / ( 3, 3 ) }
      -         DATA {
      -            1, 2, 3,
      -            1, 2, 3,
      -            1, 2, 3
      -         }
      -      }
      -   }
      -}
      -}
      -
      - Fig. 10.2b   groupsf.h5 in DDL -
      -
      -HDF5 "groupsf.h5" {
      -GROUP "/" {
      -   GROUP "MyGroup" {
      -      GROUP "Group_A" {
      -         DATASET "dset2" {
      -            DATATYPE { H5T_STD_I32BE }
      -            DATASPACE { SIMPLE ( 10, 2 ) / ( 10, 2 ) }
      -            DATA {
      -               1, 1,
      -               2, 2,
      -               3, 3,
      -               4, 4,
      -               5, 5,
      -               6, 6,
      -               7, 7,
      -               8, 8,
      -               9, 9,
      -               10, 10
      -            }
      -         }
      -      }
      -      GROUP "Group_B" {
      -      }
      -      DATASET "dset1" {
      -         DATATYPE { H5T_STD_I32BE }
      -         DATASPACE { SIMPLE ( 3, 3 ) / ( 3, 3 ) }
      -         DATA {
      -            1, 1, 1,
      -            2, 2, 2,
      -            3, 3, 3
      -         }
      -      }
      -   }
      -}
      -}
      -
      - - -


      - -
      NCSA
      The National Center for Supercomputing Applications

      - University of Illinois at Urbana-Champaign
      - -
      -hdfhelp@ncsa.uiuc.edu -
      -
      Last Modified: June 22, 2001

      - -
      -
      - - - - - - - diff --git a/doc/html/Tutor/examples/Makefile.am b/doc/html/Tutor/examples/Makefile.am deleted file mode 100644 index d28abfc..0000000 --- a/doc/html/Tutor/examples/Makefile.am +++ /dev/null @@ -1,38 +0,0 @@ -# HDF5 Library Doc Makefile(.in) -# -# Copyright (C) 1997, 2002 -# National Center for Supercomputing Applications. -# All rights reserved. -# -## -## Makefile.am -## Run automake to generate a Makefile.in from this file. -# - -include $(top_srcdir)/config/commence-doc.am - -localdocdir = $(docdir)/hdf5/Tutor/examples - -# Public doc files (to be installed)... -localdoc_DATA=h5_compound.c h5_copy.c h5_crtatt.c h5_crtdat.c h5_crtfile.c \ - h5_crtgrp.c h5_crtgrpar.c h5_crtgrpd.c h5_extend.c h5_hyperslab.c \ - h5_iterate.c h5_mount.c h5_rdwt.c h5_read.c h5_ref2objr.c \ - h5_ref2objw.c h5_ref2regr.c h5_ref2regw.c h5_reference.c - -nobase_localdoc_DATA=java/Compound.java java/Copy.java java/CreateAttribute.java \ - java/CreateDataset.java java/CreateFile.java \ - java/CreateFileInput.java java/CreateGroup.java \ - java/CreateGroupAR.java java/CreateGroupDataset.java \ - java/DatasetRdWt.java java/HyperSlab.java java/Makefile \ - java/Makefile.in java/README java/readme.html \ - java/runCompound.sh java/runCompound.sh.in java/runCopy.sh \ - java/runCopy.sh.in java/runCreateAttribute.sh \ - java/runCreateAttribute.sh.in java/runCreateDataset.sh \ - java/runCreateDataset.sh.in java/runCreateFile.sh \ - java/runCreateFile.sh.in java/runCreateFileInput.sh \ - java/runCreateFileInput.sh.in java/runCreateGroup.sh \ - java/runCreateGroup.sh.in java/runCreateGroupAR.sh \ - java/runCreateGroupAR.sh.in java/runCreateGroupDataset.sh \ - java/runCreateGroupDataset.sh.in java/runDatasetRdWt.sh \ - java/runDatasetRdWt.sh.in java/runHyperSlab.sh \ - java/runHyperSlab.sh.in diff --git a/doc/html/Tutor/examples/Makefile.in b/doc/html/Tutor/examples/Makefile.in deleted file mode 100644 index 98ec3a0..0000000 --- a/doc/html/Tutor/examples/Makefile.in +++ /dev/null @@ -1,530 +0,0 @@ -# Makefile.in generated by automake 1.9.5 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005 Free Software Foundation, Inc. -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# HDF5 Library Doc Makefile(.in) -# -# Copyright (C) 1997, 2002 -# National Center for Supercomputing Applications. -# All rights reserved. -# -# - -srcdir = @srcdir@ -top_srcdir = @top_srcdir@ -VPATH = @srcdir@ -pkgdatadir = $(datadir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -top_builddir = ../../../.. -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -INSTALL = @INSTALL@ -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ - $(top_srcdir)/config/commence-doc.am \ - $(top_srcdir)/config/commence.am -subdir = doc/html/Tutor/examples -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/configure.in -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(SHELL) $(top_srcdir)/bin/mkinstalldirs -CONFIG_HEADER = $(top_builddir)/src/H5config.h -CONFIG_CLEAN_FILES = -SOURCES = -DIST_SOURCES = -am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; -am__vpath_adj = case $$p in \ - $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ - *) f=$$p;; \ - esac; -am__strip_dir = `echo $$p | sed -e 's|^.*/||'`; -am__installdirs = "$(DESTDIR)$(localdocdir)" \ - "$(DESTDIR)$(localdocdir)" -localdocDATA_INSTALL = $(INSTALL_DATA) -nobase_localdocDATA_INSTALL = $(install_sh_DATA) -DATA = $(localdoc_DATA) $(nobase_localdoc_DATA) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) - -# Set the paths for AFS installs of autotools for Linux machines -# Ideally, these tools should never be needed during the build. -ACLOCAL = /afs/ncsa/projects/hdf/packages/automake_1.9.5/Linux_2.4/bin/aclocal -I /afs/ncsa/projects/hdf/packages/libtool_1.5.14/Linux_2.4/share/aclocal -ADD_PARALLEL_FILES = @ADD_PARALLEL_FILES@ -AMDEP_FALSE = @AMDEP_FALSE@ -AMDEP_TRUE = @AMDEP_TRUE@ -AMTAR = @AMTAR@ -AM_MAKEFLAGS = @AM_MAKEFLAGS@ -AR = @AR@ -AUTOCONF = /afs/ncsa/projects/hdf/packages/autoconf_2.59/Linux_2.4/bin/autoconf -AUTOHEADER = /afs/ncsa/projects/hdf/packages/autoconf_2.59/Linux_2.4/bin/autoheader -AUTOMAKE = /afs/ncsa/projects/hdf/packages/automake_1.9.5/Linux_2.4/bin/automake -AWK = @AWK@ -BUILD_CXX_CONDITIONAL_FALSE = @BUILD_CXX_CONDITIONAL_FALSE@ -BUILD_CXX_CONDITIONAL_TRUE = @BUILD_CXX_CONDITIONAL_TRUE@ -BUILD_FORTRAN_CONDITIONAL_FALSE = @BUILD_FORTRAN_CONDITIONAL_FALSE@ -BUILD_FORTRAN_CONDITIONAL_TRUE = @BUILD_FORTRAN_CONDITIONAL_TRUE@ -BUILD_HDF5_HL_CONDITIONAL_FALSE = @BUILD_HDF5_HL_CONDITIONAL_FALSE@ -BUILD_HDF5_HL_CONDITIONAL_TRUE = @BUILD_HDF5_HL_CONDITIONAL_TRUE@ -BUILD_PABLO_CONDITIONAL_FALSE = @BUILD_PABLO_CONDITIONAL_FALSE@ -BUILD_PABLO_CONDITIONAL_TRUE = @BUILD_PABLO_CONDITIONAL_TRUE@ -BUILD_PARALLEL_CONDITIONAL_FALSE = @BUILD_PARALLEL_CONDITIONAL_FALSE@ -BUILD_PARALLEL_CONDITIONAL_TRUE = @BUILD_PARALLEL_CONDITIONAL_TRUE@ -BUILD_PDB2HDF = @BUILD_PDB2HDF@ -BUILD_PDB2HDF_CONDITIONAL_FALSE = @BUILD_PDB2HDF_CONDITIONAL_FALSE@ -BUILD_PDB2HDF_CONDITIONAL_TRUE = @BUILD_PDB2HDF_CONDITIONAL_TRUE@ -BYTESEX = @BYTESEX@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CC_VERSION = @CC_VERSION@ -CFLAGS = @CFLAGS@ -CONFIG_DATE = @CONFIG_DATE@ -CONFIG_MODE = @CONFIG_MODE@ -CONFIG_USER = @CONFIG_USER@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CXX = @CXX@ -CXXCPP = @CXXCPP@ -CXXDEPMODE = @CXXDEPMODE@ -CXXFLAGS = @CXXFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEBUG_PKG = @DEBUG_PKG@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -DYNAMIC_DIRS = @DYNAMIC_DIRS@ -ECHO = @ECHO@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -F77 = @F77@ - -# Make sure that these variables are exported to the Makefiles -F9XMODEXT = @F9XMODEXT@ -F9XMODFLAG = @F9XMODFLAG@ -F9XSUFFIXFLAG = @F9XSUFFIXFLAG@ -FC = @FC@ -FCFLAGS = @FCFLAGS@ -FCLIBS = @FCLIBS@ -FFLAGS = @FFLAGS@ -FILTERS = @FILTERS@ -FSEARCH_DIRS = @FSEARCH_DIRS@ -H5_VERSION = @H5_VERSION@ -HADDR_T = @HADDR_T@ -HDF5_INTERFACES = @HDF5_INTERFACES@ -HID_T = @HID_T@ -HL = @HL@ -HL_FOR = @HL_FOR@ -HSIZET = @HSIZET@ -HSIZE_T = @HSIZE_T@ -HSSIZE_T = @HSSIZE_T@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -INSTRUMENT_LIBRARY = @INSTRUMENT_LIBRARY@ -LDFLAGS = @LDFLAGS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LIBTOOL = @LIBTOOL@ -LN_S = @LN_S@ -LTLIBOBJS = @LTLIBOBJS@ -LT_STATIC_EXEC = @LT_STATIC_EXEC@ -MAINT = @MAINT@ -MAINTAINER_MODE_FALSE = @MAINTAINER_MODE_FALSE@ -MAINTAINER_MODE_TRUE = @MAINTAINER_MODE_TRUE@ -MAKEINFO = @MAKEINFO@ -MPE = @MPE@ -OBJECT_NAMELEN_DEFAULT_F = @OBJECT_NAMELEN_DEFAULT_F@ -OBJEXT = @OBJEXT@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PARALLEL = @PARALLEL@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PERL = @PERL@ -PTHREAD = @PTHREAD@ -RANLIB = @RANLIB@ -ROOT = @ROOT@ -RUNPARALLEL = @RUNPARALLEL@ -RUNSERIAL = @RUNSERIAL@ -R_INTEGER = @R_INTEGER@ -R_LARGE = @R_LARGE@ -SEARCH = @SEARCH@ -SETX = @SETX@ -SET_MAKE = @SET_MAKE@ - -# Hardcode SHELL to be /bin/sh. Most machines have this shell, and -# on at least one machine configure fails to detect its existence (janus). -# Also, when HDF5 is configured on one machine but run on another, -# configure's automatic SHELL detection may not work on the build machine. -SHELL = /bin/sh -SIZE_T = @SIZE_T@ -STATIC_SHARED = @STATIC_SHARED@ -STRIP = @STRIP@ -TESTPARALLEL = @TESTPARALLEL@ -TRACE_API = @TRACE_API@ -USE_FILTER_DEFLATE = @USE_FILTER_DEFLATE@ -USE_FILTER_FLETCHER32 = @USE_FILTER_FLETCHER32@ -USE_FILTER_NBIT = @USE_FILTER_NBIT@ -USE_FILTER_SCALEOFFSET = @USE_FILTER_SCALEOFFSET@ -USE_FILTER_SHUFFLE = @USE_FILTER_SHUFFLE@ -USE_FILTER_SZIP = @USE_FILTER_SZIP@ -VERSION = @VERSION@ -ac_ct_AR = @ac_ct_AR@ -ac_ct_CC = @ac_ct_CC@ -ac_ct_CXX = @ac_ct_CXX@ -ac_ct_F77 = @ac_ct_F77@ -ac_ct_FC = @ac_ct_FC@ -ac_ct_RANLIB = @ac_ct_RANLIB@ -ac_ct_STRIP = @ac_ct_STRIP@ -am__fastdepCC_FALSE = @am__fastdepCC_FALSE@ -am__fastdepCC_TRUE = @am__fastdepCC_TRUE@ -am__fastdepCXX_FALSE = @am__fastdepCXX_FALSE@ -am__fastdepCXX_TRUE = @am__fastdepCXX_TRUE@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_vendor = @build_vendor@ -datadir = @datadir@ -exec_prefix = @exec_prefix@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ - -# Install directories that automake doesn't know about -includedir = $(exec_prefix)/include -infodir = @infodir@ -install_sh = @install_sh@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localstatedir = @localstatedir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -oldincludedir = @oldincludedir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ - -# Shell commands used in Makefiles -RM = rm -f -CP = cp - -# Some machines need a command to run executables; this is that command -# so that our tests will run. -# We use RUNTESTS instead of RUNSERIAL directly because it may be that -# some tests need to be run with a different command. Older versions -# of the makefiles used the command -# $(LIBTOOL) --mode=execute -# in some directories, for instance. -RUNTESTS = $(RUNSERIAL) - -# Libraries to link to while building -LIBHDF5 = $(top_builddir)/src/libhdf5.la -LIBH5TEST = $(top_builddir)/test/libh5test.la -LIBH5F = $(top_builddir)/fortran/src/libhdf5_fortran.la -LIBH5FTEST = $(top_builddir)/fortran/test/libh5test_fortran.la -LIBH5CPP = $(top_builddir)/c++/src/libhdf5_cpp.la -LIBH5TOOLS = $(top_builddir)/tools/lib/libh5tools.la -LIBH5_HL = $(top_builddir)/hl/src/libhdf5_hl.la -LIBH5F_HL = $(top_builddir)/hl/fortran/src/libhdf5hl_fortran.la -LIBH5CPP_HL = $(top_builddir)/hl/c++/src/libhdf5_hl_cpp.la -docdir = $(exec_prefix)/doc - -# Scripts used to build examples -H5CC = $(bindir)/h5cc -H5CC_PP = $(bindir)/h5pcc -H5FC = $(bindir)/h5fc -H5FC_PP = $(bindir)/h5pfc - -# .chkexe and .chksh files are used to mark tests that have run successfully. -MOSTLYCLEANFILES = *.chkexe *.chksh -localdocdir = $(docdir)/hdf5/Tutor/examples - -# Public doc files (to be installed)... -localdoc_DATA = h5_compound.c h5_copy.c h5_crtatt.c h5_crtdat.c h5_crtfile.c \ - h5_crtgrp.c h5_crtgrpar.c h5_crtgrpd.c h5_extend.c h5_hyperslab.c \ - h5_iterate.c h5_mount.c h5_rdwt.c h5_read.c h5_ref2objr.c \ - h5_ref2objw.c h5_ref2regr.c h5_ref2regw.c h5_reference.c - -nobase_localdoc_DATA = java/Compound.java java/Copy.java java/CreateAttribute.java \ - java/CreateDataset.java java/CreateFile.java \ - java/CreateFileInput.java java/CreateGroup.java \ - java/CreateGroupAR.java java/CreateGroupDataset.java \ - java/DatasetRdWt.java java/HyperSlab.java java/Makefile \ - java/Makefile.in java/README java/readme.html \ - java/runCompound.sh java/runCompound.sh.in java/runCopy.sh \ - java/runCopy.sh.in java/runCreateAttribute.sh \ - java/runCreateAttribute.sh.in java/runCreateDataset.sh \ - java/runCreateDataset.sh.in java/runCreateFile.sh \ - java/runCreateFile.sh.in java/runCreateFileInput.sh \ - java/runCreateFileInput.sh.in java/runCreateGroup.sh \ - java/runCreateGroup.sh.in java/runCreateGroupAR.sh \ - java/runCreateGroupAR.sh.in java/runCreateGroupDataset.sh \ - java/runCreateGroupDataset.sh.in java/runDatasetRdWt.sh \ - java/runDatasetRdWt.sh.in java/runHyperSlab.sh \ - java/runHyperSlab.sh.in - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/config/commence-doc.am $(top_srcdir)/config/commence.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \ - && exit 0; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign doc/html/Tutor/examples/Makefile'; \ - cd $(top_srcdir) && \ - $(AUTOMAKE) --foreign doc/html/Tutor/examples/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -mostlyclean-libtool: - -rm -f *.lo - -clean-libtool: - -rm -rf .libs _libs - -distclean-libtool: - -rm -f libtool -uninstall-info-am: -install-localdocDATA: $(localdoc_DATA) - @$(NORMAL_INSTALL) - test -z "$(localdocdir)" || $(mkdir_p) "$(DESTDIR)$(localdocdir)" - @list='$(localdoc_DATA)'; for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - f=$(am__strip_dir) \ - echo " $(localdocDATA_INSTALL) '$$d$$p' '$(DESTDIR)$(localdocdir)/$$f'"; \ - $(localdocDATA_INSTALL) "$$d$$p" "$(DESTDIR)$(localdocdir)/$$f"; \ - done - -uninstall-localdocDATA: - @$(NORMAL_UNINSTALL) - @list='$(localdoc_DATA)'; for p in $$list; do \ - f=$(am__strip_dir) \ - echo " rm -f '$(DESTDIR)$(localdocdir)/$$f'"; \ - rm -f "$(DESTDIR)$(localdocdir)/$$f"; \ - done -install-nobase_localdocDATA: $(nobase_localdoc_DATA) - @$(NORMAL_INSTALL) - test -z "$(localdocdir)" || $(mkdir_p) "$(DESTDIR)$(localdocdir)" - @$(am__vpath_adj_setup) \ - list='$(nobase_localdoc_DATA)'; for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - $(am__vpath_adj) \ - echo " $(nobase_localdocDATA_INSTALL) '$$d$$p' '$(DESTDIR)$(localdocdir)/$$f'"; \ - $(nobase_localdocDATA_INSTALL) "$$d$$p" "$(DESTDIR)$(localdocdir)/$$f"; \ - done - -uninstall-nobase_localdocDATA: - @$(NORMAL_UNINSTALL) - @$(am__vpath_adj_setup) \ - list='$(nobase_localdoc_DATA)'; for p in $$list; do \ - $(am__vpath_adj) \ - echo " rm -f '$(DESTDIR)$(localdocdir)/$$f'"; \ - rm -f "$(DESTDIR)$(localdocdir)/$$f"; \ - done -tags: TAGS -TAGS: - -ctags: CTAGS -CTAGS: - - -distdir: $(DISTFILES) - $(mkdir_p) $(distdir)/../../../../config - @srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's|.|.|g'`; \ - list='$(DISTFILES)'; for file in $$list; do \ - case $$file in \ - $(srcdir)/*) file=`echo "$$file" | sed "s|^$$srcdirstrip/||"`;; \ - $(top_srcdir)/*) file=`echo "$$file" | sed "s|^$$topsrcdirstrip/|$(top_builddir)/|"`;; \ - esac; \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - dir=`echo "$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test "$$dir" != "$$file" && test "$$dir" != "."; then \ - dir="/$$dir"; \ - $(mkdir_p) "$(distdir)$$dir"; \ - else \ - dir=''; \ - fi; \ - if test -d $$d/$$file; then \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \ - fi; \ - cp -pR $$d/$$file $(distdir)$$dir || exit 1; \ - else \ - test -f $(distdir)/$$file \ - || cp -p $$d/$$file $(distdir)/$$file \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: - for dir in "$(DESTDIR)$(localdocdir)" "$(DESTDIR)$(localdocdir)"; do \ - test -z "$$dir" || $(mkdir_p) "$$dir"; \ - done -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - `test -z '$(STRIP)' || \ - echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install -mostlyclean-generic: - -test -z "$(MOSTLYCLEANFILES)" || rm -f $(MOSTLYCLEANFILES) - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." -clean: clean-am - -clean-am: clean-generic clean-libtool mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic distclean-libtool - -dvi: dvi-am - -dvi-am: - -html: html-am - -info: info-am - -info-am: - -install-data-am: install-localdocDATA install-nobase_localdocDATA - -install-exec-am: - -install-info: install-info-am - -install-man: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic mostlyclean-libtool - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: uninstall-info-am uninstall-localdocDATA \ - uninstall-nobase_localdocDATA - -.PHONY: all all-am check check-am clean clean-generic clean-libtool \ - distclean distclean-generic distclean-libtool distdir dvi \ - dvi-am html html-am info info-am install install-am \ - install-data install-data-am install-exec install-exec-am \ - install-info install-info-am install-localdocDATA install-man \ - install-nobase_localdocDATA install-strip installcheck \ - installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic \ - mostlyclean-libtool pdf pdf-am ps ps-am uninstall uninstall-am \ - uninstall-info-am uninstall-localdocDATA \ - uninstall-nobase_localdocDATA - - -# Ignore most rules -lib progs check test _test check-p check-s: - @echo "Nothing to be done" - -tests dep depend: - @@SETX@; for d in X $(SUBDIRS); do \ - if test $$d != X; then \ - (cd $$d && $(MAKE) $(AM_MAKEFLAGS) $@) || exit 1; \ - fi; - done - -# In docs directory, install-doc is the same as install -install-doc install-all: - $(MAKE) $(AM_MAKEFLAGS) install -uninstall-doc uninstall-all: - $(MAKE) $(AM_MAKEFLAGS) uninstall -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/doc/html/Tutor/examples/attrexample.f90 b/doc/html/Tutor/examples/attrexample.f90 deleted file mode 100644 index 9198eb8..0000000 --- a/doc/html/Tutor/examples/attrexample.f90 +++ /dev/null @@ -1,87 +0,0 @@ -! This example shows how to create and write a dataset attribute. -! It opens the existing file 'dset.h5', obtains the identifier of -! the dataset "/dset", defines attribute's dataspace, -! creates dataset attribute, writes the attribute, and then closes -! the attribute's dataspace, attribute, dataset, and file. - - PROGRAM ATTREXAMPLE - - - USE HDF5 ! This module contains all necessary modules - - IMPLICIT NONE - - CHARACTER(LEN=8), PARAMETER :: filename = "dsetf.h5" ! File name - CHARACTER(LEN=4), PARAMETER :: dsetname = "dset" ! Dataset name - CHARACTER(LEN=4), PARAMETER :: aname = "attr" ! Attribute name - - INTEGER(HID_T) :: file_id ! File identifier - INTEGER(HID_T) :: dset_id ! Dataset identifier - INTEGER(HID_T) :: attr_id ! Attribute identifier - INTEGER(HID_T) :: aspace_id ! Attribute Dataspace identifier - - INTEGER(HSIZE_T), DIMENSION(1) :: adims = (/2/) ! Attribute dimension - INTEGER, DIMENSION(2) :: attr_data = (/100,200/)! Attribute data - INTEGER :: arank = 1 ! Attribure rank - - INTEGER :: error ! Error flag - - - ! - ! Initialize FORTRAN predefined datatypes. - ! - CALL h5open_f(error) - - ! - ! Open an existing file. - ! - CALL h5fopen_f (filename, H5F_ACC_RDWR_F, file_id, error) - - ! - ! Open an existing dataset. - ! - CALL h5dopen_f(file_id, dsetname, dset_id, error) - - ! - ! Create the data space for the attribute. - ! - CALL h5screate_simple_f(arank, adims, aspace_id, error) - - ! - ! Create dataset attribute. - ! - CALL h5acreate_f(dset_id, aname, H5T_NATIVE_INTEGER,aspace_id, & - attr_id, error) - - ! - ! Write the attribute data. - ! - CALL h5awrite_f(attr_id, H5T_NATIVE_INTEGER, attr_data, error) - - ! - ! Close the attribute. - ! - CALL h5aclose_f(attr_id, error) - - ! - ! Terminate access to the data space. - ! - CALL h5sclose_f(aspace_id, error) - - ! - ! End access to the dataset and release resources used by it. - ! - CALL h5dclose_f(dset_id, error) - - ! - ! Close the file. - ! - CALL h5fclose_f(file_id, error) - - ! - ! Close FORTRAN predefined datatypes. - ! - CALL h5close_f(error) - - END PROGRAM ATTREXAMPLE - diff --git a/doc/html/Tutor/examples/chunk.f90 b/doc/html/Tutor/examples/chunk.f90 deleted file mode 100644 index 2810b5c..0000000 --- a/doc/html/Tutor/examples/chunk.f90 +++ /dev/null @@ -1,310 +0,0 @@ -! -!This example shows how to work with extendible datasets. -!It creates a 3 x 3 extendible dataset, write to that dataset, -!extend the dataset to 10x3, and write to the dataset again -! - - - - - PROGRAM CHUNKEXAMPLE - - USE HDF5 ! This module contains all necessary modules - - IMPLICIT NONE - - ! - !the dataset is stored in file "extf.h5" - ! - CHARACTER(LEN=7), PARAMETER :: filename = "extf.h5" - - ! - !dataset name is "ExtendibleArray" - ! - CHARACTER(LEN=15), PARAMETER :: dsetname = "ExtendibleArray" - - ! - !dataset rank is 2 - ! - INTEGER :: RANK = 2 - - INTEGER(HID_T) :: file_id ! File identifier - INTEGER(HID_T) :: dset_id ! Dataset identifier - INTEGER(HID_T) :: dataspace ! Dataspace identifier - INTEGER(HID_T) :: filespace ! Dataspace identifier - INTEGER(HID_T) :: memspace ! memspace identifier - INTEGER(HID_T) :: cparms !dataset creatation property identifier - - ! - !dataset dimensions at creation time - ! - INTEGER(HSIZE_T), DIMENSION(2) :: dims = (/3,3/) - - ! - !data1 dimensions - ! - INTEGER(HSIZE_T), DIMENSION(2) :: dims1 = (/3,3/) - - ! - !data2 dimensions - ! - INTEGER(HSIZE_T), DIMENSION(2) :: dims2 = (/7,1/) - - ! - !Maximum dimensions - ! - INTEGER(HSIZE_T), DIMENSION(2) :: maxdims - - ! - !data1 dimensions - ! - INTEGER, DIMENSION(3,3) :: data1 - - ! - !data2 dimensions - ! - INTEGER, DIMENSION(7,1) :: data2 - - ! - !Size of the hyperslab in the file - ! - INTEGER(HSIZE_T), DIMENSION(2) :: size - - ! - !hyperslab offset in the file - ! - INTEGER(HSIZE_T), DIMENSION(2) :: offset - - ! - !general purpose integer - ! - INTEGER :: i, j, k - - ! - !flag to check operation success - ! - INTEGER :: error, error_n - - ! - !Variables used in reading data back - ! - INTEGER(HSIZE_T), DIMENSION(2) :: chunk_dims = (/5,2/) - INTEGER(HSIZE_T), DIMENSION(2) :: chunk_dimsr - INTEGER(HSIZE_T), DIMENSION(2) :: dimsr, maxdimsr - INTEGER, DIMENSION(10,3) :: data_out - INTEGER :: rankr, rank_chunk - - ! - !data initialization - ! - do i = 1, 3 - do j = 1, 3 - data1(i,j) = 1 - end do - end do - - do j = 1, 7 - data2(j,1) = 2 - end do - - - ! - !Initialize FORTRAN predifined datatypes - ! - CALL h5open_f(error) - - ! - !Create a new file using default properties. - ! - CALL h5fcreate_f(filename, H5F_ACC_TRUNC_F, file_id, error) - - - ! - !Create the data space with unlimited dimensions. - ! - maxdims = (/H5S_UNLIMITED_f, H5S_UNLIMITED_f/) - - CALL h5screate_simple_f(RANK, dims, dataspace, error, maxdims) - - ! - !Modify dataset creation properties, i.e. enable chunking - ! - CALL h5pcreate_f(H5P_DATASET_CREATE_F, cparms, error) - - CALL h5pset_chunk_f(cparms, RANK, chunk_dims, error) - - ! - !Create a new dataset within the file using cparms creation properties. - ! - !CALL h5dcreate_f(file_id, dsetname, H5T_NATIVE_INT_F, dataspace, & - CALL h5dcreate_f(file_id, dsetname, H5T_NATIVE_INTEGER, dataspace, & - dset_id, error, cparms) - - ! - !Extend the dataset. This call assures that dataset is 3 x 3. - ! - size(1) = 3 - size(2) = 3 - CALL h5dextend_f(dset_id, size, error) - - - ! - !Select a hyperslab. - ! - CALL h5dget_space_f(dset_id, filespace, error) - offset(1) = 0; - offset(2) = 0; - CALL h5sselect_hyperslab_f(filespace, H5S_SELECT_SET_F, & - offset, dims1, error) - - ! - !Write the data to the hyperslab. - ! - !CALL H5Dwrite_f(dset_id, H5T_NATIVE_INT_F, data1, error, & - CALL H5Dwrite_f(dset_id, H5T_NATIVE_INTEGER, data1, error, & - filespace, dataspace) - - ! - !Extend the dataset. Dataset becomes 10 x 3. - ! - dims(1) = dims1(1) + dims2(1); - size(1) = dims(1); - size(2) = dims(2); - CALL h5dextend_f(dset_id, size, error) - - ! - !Select a hyperslab. - ! - CALL h5dget_space_f(dset_id, filespace, error) - offset(1) = 3; - offset(2) = 0; - CALL h5sselect_hyperslab_f(filespace, H5S_SELECT_SET_F, & - offset, dims2, error) - - ! - !create memory dataspace. - ! - CALL h5screate_simple_f(RANK, dims2, memspace, error) - - ! - !Write the data to the hyperslab. - ! - !CALL H5Dwrite_f(dset_id, H5T_NATIVE_INT_F, data2, error, & - CALL H5Dwrite_f(dset_id, H5T_NATIVE_INTEGER, data2, error, & - mem_space_id=memspace, file_space_id=filespace) - - ! - !Close the dataspace for the dataset. - ! - CALL h5sclose_f(dataspace, error) - CALL h5sclose_f(filespace, error) - - ! - !Close the memoryspace. - ! - CALL h5sclose_f(memspace, error) - - ! - !Close the dataset. - ! - CALL h5dclose_f(dset_id, error) - - ! - !Close the property list. - ! - CALL h5pclose_f(cparms, error) - - ! - !Close the file. - ! - CALL h5fclose_f(file_id, error) - - ! - !read the data back - ! - !Open the file. - ! - CALL h5fopen_f (filename, H5F_ACC_RDONLY_F, file_id, error) - - ! - !Open the dataset. - ! - CALL h5dopen_f(file_id, dsetname, dset_id, error) - - ! - !Get dataset's dataspace handle. - ! - CALL h5dget_space_f(dset_id, dataspace, error) - - ! - !Get dataspace's rank. - ! - CALL h5sget_simple_extent_ndims_f(dataspace, rankr, error) - - - ! - !Get dataspace's dimensinons. - ! - CALL h5sget_simple_extent_dims_f(dataspace, dimsr, maxdimsr, error) - - - ! - !Get creation property list. - ! - CALL h5dget_create_plist_f(dset_id, cparms, error) - - ! - !Get chunk dimensions. - ! - CALL h5pget_chunk_f(cparms, 2, chunk_dimsr, error) - - ! - !create memory dataspace. - ! - CALL h5screate_simple_f(rankr, dimsr, memspace, error) - - ! - !Read data - ! - !CALL H5Dread_f(dset_id, H5T_NATIVE_INT_F, data_out, error, & - CALL H5Dread_f(dset_id, H5T_NATIVE_INTEGER, data_out, error, & - memspace, dataspace) - - ! - !Print data - ! - do i = 1, dimsr(1) - print *, (data_out(i,j), j = 1,dimsr(2)) - end do - - ! - !Close the dataspace for the dataset. - ! - CALL h5sclose_f(dataspace, error) - - ! - !Close the memoryspace. - ! - CALL h5sclose_f(memspace, error) - - ! - !Close the dataset. - ! - CALL h5dclose_f(dset_id, error) - - ! - !Close the file. - ! - CALL h5fclose_f(file_id, error) - - ! - !Close the property list. - ! - CALL h5pclose_f(cparms, error) - - ! - ! Close FORTRAN predefined datatypes. - ! - CALL h5close_f(error) - - END PROGRAM CHUNKEXAMPLE diff --git a/doc/html/Tutor/examples/compound.f90 b/doc/html/Tutor/examples/compound.f90 deleted file mode 100644 index a2bd6b0..0000000 --- a/doc/html/Tutor/examples/compound.f90 +++ /dev/null @@ -1,215 +0,0 @@ -! -! This program creates a dataset that is one dimensional array of -! structures { -! character*2 -! integer -! double precision -! real -! } -! Data is written and read back by fields. -! - - PROGRAM COMPOUNDEXAMPLE - - USE HDF5 ! This module contains all necessary modules - - IMPLICIT NONE - - CHARACTER(LEN=11), PARAMETER :: filename = "compound.h5" ! File name - CHARACTER(LEN=8), PARAMETER :: dsetname = "Compound" ! Dataset name - INTEGER, PARAMETER :: dimsize = 6 ! Size of the dataset - - INTEGER(HID_T) :: file_id ! File identifier - INTEGER(HID_T) :: dset_id ! Dataset identifier - INTEGER(HID_T) :: dspace_id ! Dataspace identifier - INTEGER(HID_T) :: dtype_id ! Compound datatype identifier - INTEGER(HID_T) :: dt1_id ! Memory datatype identifier (for character field) - INTEGER(HID_T) :: dt2_id ! Memory datatype identifier (for integer field) - INTEGER(HID_T) :: dt3_id ! Memory datatype identifier (for double precision field) - INTEGER(HID_T) :: dt4_id ! Memory datatype identifier (for real field) - INTEGER(HID_T) :: dt5_id ! Memory datatype identifier - INTEGER(HID_T) :: plist_id ! Dataset trasfer property - INTEGER(SIZE_T) :: typesize - - - INTEGER(HSIZE_T), DIMENSION(1) :: dims = (/dimsize/) ! Dataset dimensions - INTEGER :: rank = 1 ! Dataset rank - - INTEGER :: error ! Error flag - INTEGER(SIZE_T) :: type_size ! Size of the datatype - INTEGER(SIZE_T) :: type_sizec ! Size of the character datatype - INTEGER(SIZE_T) :: type_sizei ! Size of the integer datatype - INTEGER(SIZE_T) :: type_sized ! Size of the double precision datatype - INTEGER(SIZE_T) :: type_sizer ! Size of the real datatype - INTEGER(SIZE_T) :: offset ! Member's offset - CHARACTER*2, DIMENSION(dimsize) :: char_member - CHARACTER*2, DIMENSION(dimsize) :: char_member_out ! Buffer to read data out - INTEGER, DIMENSION(dimsize) :: int_member - DOUBLE PRECISION, DIMENSION(dimsize) :: double_member - REAL, DIMENSION(dimsize) :: real_member - INTEGER :: i - ! - ! Initialize data buffer. - ! - do i = 1, dimsize - char_member(i)(1:1) = char(65+i) - char_member(i)(2:2) = char(65+i) - char_member_out(i)(1:1) = char(65) - char_member_out(i)(2:2) = char(65) - int_member(i) = i - double_member(i) = 2.* i - real_member(i) = 3. * i - enddo - - ! - ! Initialize FORTRAN interface. - ! - CALL h5open_f(error) - ! - ! Set dataset transfer property to preserve partially initialized fields - ! during write/read to/from dataset with compound datatype. - ! - CALL h5pcreate_f(H5P_DATASET_XFER_F, plist_id, error) - CALL h5pset_preserve_f(plist_id, 1, error) - - ! - ! Create a new file using default properties. - ! - CALL h5fcreate_f(filename, H5F_ACC_TRUNC_F, file_id, error) - - ! - ! Create the dataspace. - ! - CALL h5screate_simple_f(rank, dims, dspace_id, error) - ! - ! Create compound datatype. - ! - ! First calculate total size by calculating sizes of each member - ! - CALL h5tcopy_f(H5T_NATIVE_CHARACTER, dt5_id, error) - typesize = 2 - CALL h5tset_size_f(dt5_id, typesize, error) - CALL h5tget_size_f(dt5_id, type_sizec, error) - CALL h5tget_size_f(H5T_NATIVE_INTEGER, type_sizei, error) - CALL h5tget_size_f(H5T_NATIVE_DOUBLE, type_sized, error) - CALL h5tget_size_f(H5T_NATIVE_REAL, type_sizer, error) - type_size = type_sizec + type_sizei + type_sized + type_sizer - CALL h5tcreate_f(H5T_COMPOUND_F, type_size, dtype_id, error) - ! - ! Insert memebers - ! - ! CHARACTER*2 memeber - ! - offset = 0 - CALL h5tinsert_f(dtype_id, "char_field", offset, dt5_id, error) - ! - ! INTEGER member - ! - offset = offset + type_sizec ! Offset of the second memeber is 2 - CALL h5tinsert_f(dtype_id, "integer_field", offset, H5T_NATIVE_INTEGER, error) - ! - ! DOUBLE PRECISION member - ! - offset = offset + type_sizei ! Offset of the third memeber is 6 - CALL h5tinsert_f(dtype_id, "double_field", offset, H5T_NATIVE_DOUBLE, error) - ! - ! REAL member - ! - offset = offset + type_sized ! Offset of the last member is 14 - CALL h5tinsert_f(dtype_id, "real_field", offset, H5T_NATIVE_REAL, error) - - ! - ! Create the dataset with compound datatype. - ! - CALL h5dcreate_f(file_id, dsetname, dtype_id, dspace_id, & - dset_id, error) - ! - ! Create memory types. We have to create a compound datatype - ! for each member we want to write. - ! - CALL h5tcreate_f(H5T_COMPOUND_F, type_sizec, dt1_id, error) - offset = 0 - CALL h5tinsert_f(dt1_id, "char_field", offset, dt5_id, error) - ! - CALL h5tcreate_f(H5T_COMPOUND_F, type_sizei, dt2_id, error) - offset = 0 - CALL h5tinsert_f(dt2_id, "integer_field", offset, H5T_NATIVE_INTEGER, error) - ! - CALL h5tcreate_f(H5T_COMPOUND_F, type_sized, dt3_id, error) - offset = 0 - CALL h5tinsert_f(dt3_id, "double_field", offset, H5T_NATIVE_DOUBLE, error) - ! - CALL h5tcreate_f(H5T_COMPOUND_F, type_sizer, dt4_id, error) - offset = 0 - CALL h5tinsert_f(dt4_id, "real_field", offset, H5T_NATIVE_REAL, error) - ! - ! Write data by fields in the datatype. Fields order is not important. - ! - CALL h5dwrite_f(dset_id, dt4_id, real_member, error, xfer_prp = plist_id) - CALL h5dwrite_f(dset_id, dt1_id, char_member, error, xfer_prp = plist_id) - CALL h5dwrite_f(dset_id, dt3_id, double_member, error, xfer_prp = plist_id) - CALL h5dwrite_f(dset_id, dt2_id, int_member, error, xfer_prp = plist_id) - - ! - ! End access to the dataset and release resources used by it. - ! - CALL h5dclose_f(dset_id, error) - - ! - ! Terminate access to the data space. - ! - CALL h5sclose_f(dspace_id, error) - ! - ! Terminate access to the datatype - ! - CALL h5tclose_f(dtype_id, error) - CALL h5tclose_f(dt1_id, error) - CALL h5tclose_f(dt2_id, error) - CALL h5tclose_f(dt3_id, error) - CALL h5tclose_f(dt4_id, error) - CALL h5tclose_f(dt5_id, error) - - ! - ! Close the file. - ! - CALL h5fclose_f(file_id, error) - - ! - ! Open the file. - ! - CALL h5fopen_f (filename, H5F_ACC_RDWR_F, file_id, error) - ! - ! Open the dataset. - ! - CALL h5dopen_f(file_id, dsetname, dset_id, error) - ! - ! Create memeory datatyoe to read character member of the compound datatype. - ! - CALL h5tcopy_f(H5T_NATIVE_CHARACTER, dt2_id, error) - typesize = 2 - CALL h5tset_size_f(dt2_id, typesize, error) - CALL h5tget_size_f(dt2_id, type_size, error) - CALL h5tcreate_f(H5T_COMPOUND_F, type_size, dt1_id, error) - offset = 0 - CALL h5tinsert_f(dt1_id, "char_field", offset, dt2_id, error) - ! - ! Read part of the datatset and display it. - ! - CALL h5dread_f(dset_id, dt1_id, char_member_out, error) - write(*,*) (char_member_out(i), i=1, dimsize) - - ! - ! Close all open objects. - ! - CALL h5dclose_f(dset_id, error) - CALL h5tclose_f(dt1_id, error) - CALL h5tclose_f(dt2_id, error) - CALL h5fclose_f(file_id, error) - ! - ! Close FORTRAN interface. - ! - CALL h5close_f(error) - - END PROGRAM COMPOUNDEXAMPLE - - diff --git a/doc/html/Tutor/examples/dsetexample.f90 b/doc/html/Tutor/examples/dsetexample.f90 deleted file mode 100644 index 9b69a3f..0000000 --- a/doc/html/Tutor/examples/dsetexample.f90 +++ /dev/null @@ -1,70 +0,0 @@ -! -! The following example shows how to create an empty dataset. -! It creates a file called 'dsetf.h5', defines the -! dataset dataspace, creates a dataset which is a 4x6 integer array, -! and then closes the dataspace, the dataset, and the file. -! - - PROGRAM DSETEXAMPLE - - USE HDF5 ! This module contains all necessary modules - - IMPLICIT NONE - - CHARACTER(LEN=8), PARAMETER :: filename = "dsetf.h5" ! File name - CHARACTER(LEN=4), PARAMETER :: dsetname = "dset" ! Dataset name - - INTEGER(HID_T) :: file_id ! File identifier - INTEGER(HID_T) :: dset_id ! Dataset identifier - INTEGER(HID_T) :: dspace_id ! Dataspace identifier - - - INTEGER(HSIZE_T), DIMENSION(2) :: dims = (/4,6/) ! Dataset dimensions - INTEGER :: rank = 2 ! Dataset rank - - INTEGER :: error ! Error flag - - ! - ! Initialize FORTRAN predefined datatypes. - ! - CALL h5open_f(error) - - ! - ! Create a new file using default properties. - ! - CALL h5fcreate_f(filename, H5F_ACC_TRUNC_F, file_id, error) - - ! - ! Create the dataspace. - ! - CALL h5screate_simple_f(rank, dims, dspace_id, error) - - ! - ! Create the dataset with default properties. - ! - CALL h5dcreate_f(file_id, dsetname, H5T_NATIVE_INTEGER, dspace_id, & - dset_id, error) - - ! - ! End access to the dataset and release resources used by it. - ! - CALL h5dclose_f(dset_id, error) - - ! - ! Terminate access to the data space. - ! - CALL h5sclose_f(dspace_id, error) - - ! - ! Close the file. - ! - CALL h5fclose_f(file_id, error) - - ! - ! Close FORTRAN predefined datatypes. - ! - CALL h5close_f(error) - - END PROGRAM DSETEXAMPLE - - diff --git a/doc/html/Tutor/examples/fileexample.f90 b/doc/html/Tutor/examples/fileexample.f90 deleted file mode 100644 index e11dcaa..0000000 --- a/doc/html/Tutor/examples/fileexample.f90 +++ /dev/null @@ -1,34 +0,0 @@ -! -! The following example demonstrates how to create and close an HDF5 file. -! It creates a file called 'file.h5', and then closes the file. -! - - PROGRAM FILEEXAMPLE - - USE HDF5 ! This module contains all necessary modules - - IMPLICIT NONE - - CHARACTER(LEN=8), PARAMETER :: filename = "filef.h5" ! File name - INTEGER(HID_T) :: file_id ! File identifier - - INTEGER :: error ! Error flag - -! -! Initialize FORTRAN interface. -! - CALL h5open_f (error) - ! - ! Create a new file using default properties. - ! - CALL h5fcreate_f(filename, H5F_ACC_TRUNC_F, file_id, error) - - ! - ! Terminate access to the file. - ! - CALL h5fclose_f(file_id, error) -! -! Close FORTRAN interface. -! - CALL h5close_f(error) - END PROGRAM FILEEXAMPLE diff --git a/doc/html/Tutor/examples/groupexample.f90 b/doc/html/Tutor/examples/groupexample.f90 deleted file mode 100644 index d98d7cd..0000000 --- a/doc/html/Tutor/examples/groupexample.f90 +++ /dev/null @@ -1,49 +0,0 @@ -! -! The following example shows how to create and close a group. -! It creates a file called 'group.h5', creates a group -! called MyGroup in the root group, and then closes the group and file. -! - - - PROGRAM GROUPEXAMPLE - - USE HDF5 ! This module contains all necessary modules - - IMPLICIT NONE - - CHARACTER(LEN=9), PARAMETER :: filename = "groupf.h5" ! File name - CHARACTER(LEN=7), PARAMETER :: groupname = "MyGroup" ! Group name - - INTEGER(HID_T) :: file_id ! File identifier - INTEGER(HID_T) :: group_id ! Group identifier - - INTEGER :: error ! Error flag -! -! Initialize FORTRAN interface. -! - CALL h5open_f(error) - ! - ! Create a new file using default properties. - ! - CALL h5fcreate_f(filename, H5F_ACC_TRUNC_F, file_id, error) - - ! - ! Create a group named "/MyGroup" in the file. - ! - CALL h5gcreate_f(file_id, groupname, group_id, error) - - ! - ! Close the group. - ! - CALL h5gclose_f(group_id, error) - - ! - ! Terminate access to the file. - ! - CALL h5fclose_f(file_id, error) -! -! Close FORTRAN interface. -! - CALL h5close_f(error) - - END PROGRAM GROUPEXAMPLE diff --git a/doc/html/Tutor/examples/grpdsetexample.f90 b/doc/html/Tutor/examples/grpdsetexample.f90 deleted file mode 100644 index ceb2fe9..0000000 --- a/doc/html/Tutor/examples/grpdsetexample.f90 +++ /dev/null @@ -1,136 +0,0 @@ -! -! This example shows how to create a dataset in a particular group. -! It opens the file created in the previous example and creates two datasets. -! Absolute and relative dataset names are used. -! - - - PROGRAM GRPDSETEXAMPLE - - USE HDF5 ! This module contains all necessary modules - - IMPLICIT NONE - - CHARACTER(LEN=10), PARAMETER :: filename = "groupsf.h5" ! File name - CHARACTER(LEN=15), PARAMETER :: groupname = "MyGroup/Group_A" ! Group name - CHARACTER(LEN=13), PARAMETER :: dsetname1 = "MyGroup/dset1" ! Dataset name - CHARACTER(LEN=5), PARAMETER :: dsetname2 = "dset2" ! dataset name - - INTEGER(HID_T) :: file_id ! File identifier - INTEGER(HID_T) :: group_id ! Group identifier - INTEGER(HID_T) :: dataset_id ! Dataset identifier - INTEGER(HID_T) :: dataspace_id ! Data space identifier - - INTEGER :: i, j - INTEGER :: error ! Error flag - - INTEGER, DIMENSION(3,3) :: dset1_data ! Data arrays - INTEGER, DIMENSION(2,10) :: dset2_data ! - - INTEGER(HSIZE_T), DIMENSION(2) :: dims1 = (/3,3/) ! Datasets dimensions - INTEGER(HSIZE_T), DIMENSION(2) :: dims2 = (/2,10/)! - - INTEGER :: rank = 2 ! Datasets rank - - ! - !Initialize dset1_data array - ! - do i = 1, 3 - do j = 1, 3 - dset1_data(i,j) = j; - end do - end do - - - ! - !Initialize dset2_data array - ! - do i = 1, 2 - do j = 1, 10 - dset2_data(i,j) = j; - end do - end do - - ! - ! Initialize FORTRAN predefined datatypes. - ! - CALL h5open_f(error) - - ! - ! Open an existing file. - ! - CALL h5fopen_f (filename, H5F_ACC_RDWR_F, file_id, error) - - ! - ! Create the data space for the first dataset. - ! - CALL h5screate_simple_f(rank, dims1, dataspace_id, error) - - ! - ! Create a dataset in group "MyGroup" with default properties. - ! - CALL h5dcreate_f(file_id, dsetname1, H5T_NATIVE_INTEGER, dataspace_id, & - dataset_id, error) - - ! - ! Write the first dataset. - ! - CALL h5dwrite_f(dataset_id, H5T_NATIVE_INTEGER, dset1_data, error) - - ! - ! Close the dataspace for the first dataset. - ! - CALL h5sclose_f(dataspace_id, error) - - ! - ! Close the first dataset. - ! - CALL h5dclose_f(dataset_id, error) - - ! - ! Open an existing group in the specified file. - ! - CALL h5gopen_f(file_id, groupname, group_id, error) - - ! - !Create the data space for the second dataset. - ! - CALL h5screate_simple_f(rank, dims2, dataspace_id, error) - - ! - ! Create the second dataset in group "Group_A" with default properties. - ! - CALL h5dcreate_f(group_id, dsetname2, H5T_NATIVE_INTEGER, dataspace_id, & - dataset_id, error) - - ! - ! Write the second dataset. - ! - CALL h5dwrite_f(dataset_id, H5T_NATIVE_INTEGER, dset2_data, error) - - ! - ! Close the dataspace for the second dataset. - ! - CALL h5sclose_f(dataspace_id, error) - - ! - ! Close the second dataset. - ! - CALL h5dclose_f(dataset_id, error) - - ! - ! Close the group. - ! - CALL h5gclose_f(group_id, error) - - ! - ! Close the file. - ! - CALL h5fclose_f(file_id, error) - - ! - ! Close FORTRAN predefined datatypes. - ! - CALL h5close_f(error) - - END PROGRAM GRPDSETEXAMPLE diff --git a/doc/html/Tutor/examples/grpit.f90 b/doc/html/Tutor/examples/grpit.f90 deleted file mode 100644 index 3aff2ad..0000000 --- a/doc/html/Tutor/examples/grpit.f90 +++ /dev/null @@ -1,194 +0,0 @@ -! -! In this example we iterate through the members of the groups. -! - - - PROGRAM GRPITEXAMPLE - - USE HDF5 ! This module contains all necessary modules - - IMPLICIT NONE - - CHARACTER(LEN=11), PARAMETER :: filename = "iteratef.h5" ! File name - CHARACTER(LEN=7), PARAMETER :: groupname1 = "MyGroup" ! Group name - CHARACTER(LEN=15), PARAMETER :: groupname2 = "Group_A" ! Group name - CHARACTER(LEN=13), PARAMETER :: dsetname1 = "dset1" ! Dataset name - CHARACTER(LEN=5), PARAMETER :: dsetname2 = "dset2" ! - - CHARACTER(LEN=20) :: name_buffer ! Buffer to hold object's name - INTEGER :: type ! Type of the object - INTEGER :: nmembers ! Number of group members - - INTEGER(HID_T) :: file_id ! File identifier - INTEGER(HID_T) :: dataset1_id ! Dataset1 identifier - INTEGER(HID_T) :: dataset2_id ! Dataset2 identifier - INTEGER(HID_T) :: dataspace1_id ! Data space identifier - INTEGER(HID_T) :: dataspace2_id ! Data space identifier - INTEGER(HID_T) :: group1_id, group2_id ! Group identifiers - - INTEGER :: i, j - - INTEGER :: error ! Error flag - - INTEGER, DIMENSION(3,3) :: dset1_data ! Arrays to hold data - INTEGER, DIMENSION(2,10) :: dset2_data ! - - INTEGER(HSIZE_T), DIMENSION(2) :: dims1 = (/3,3/) ! Dataset dimensions - INTEGER(HSIZE_T), DIMENSION(2) :: dims2 = (/2,10/)! - INTEGER :: rank = 2 ! Datasets rank - - ! - ! Initialize dset1_data array. - ! - do i = 1, 3 - do j = 1, 3 - dset1_data(i,j) = j; - end do - end do - - - ! - ! Initialize dset2_data array. - ! - do i = 1, 2 - do j = 1, 10 - dset2_data(i,j) = j; - end do - end do - - ! - ! Initialize FORTRAN interface. - ! - CALL h5open_f(error) - - ! - ! Create a new file using default properties. - ! - CALL h5fcreate_f(filename, H5F_ACC_TRUNC_F, file_id, error) - - ! - ! Create group "MyGroup" in the root group using absolute name. - ! - CALL h5gcreate_f(file_id, groupname1, group1_id, error) - - ! - ! Create group "Group_A" in group "MyGroup" using relative name. - ! - CALL h5gcreate_f(group1_id, groupname2, group2_id, error) - - ! - ! Create the data space for the first dataset. - ! - CALL h5screate_simple_f(rank, dims1, dataspace1_id, error) - - ! - ! Create a dataset in group "MyGroup" with default properties. - ! - CALL h5dcreate_f(group1_id, dsetname1, H5T_NATIVE_INTEGER, dataspace1_id, & - dataset1_id, error) - - ! - ! Write the first dataset. - ! - CALL h5dwrite_f(dataset1_id, H5T_NATIVE_INTEGER, dset1_data, error) - - ! - ! Create the data space for the second dataset. - ! - CALL h5screate_simple_f(rank, dims2, dataspace2_id, error) - - ! - ! Create the second dataset in group "Group_A" with default properties - ! - CALL h5dcreate_f(group2_id, dsetname2, H5T_NATIVE_INTEGER, dataspace2_id, & - dataset2_id, error) - - ! - ! Write the second dataset - ! - CALL h5dwrite_f(dataset2_id, H5T_NATIVE_INTEGER, dset2_data, error) - - ! - ! Get number of members in the root group. - ! - CALL h5gn_members_f(file_id, "/", nmembers, error) - write(*,*) "Number of root group member is " , nmembers - - ! - ! Print each group member's name and type. - ! - do i = 0, nmembers - 1 - CALL h5gget_obj_info_idx_f(file_id, "/", i, name_buffer, type, & - error) - write(*,*) name_buffer, type - end do - - ! - ! Get number of members in MyGroup. - ! - CALL h5gn_members_f(file_id, "MyGroup", nmembers, error) - write(*,*) "Number of group MyGroup member is ", nmembers - - ! - ! Print each group member's name and type in "MyGroup" group. - ! - do i = 0, nmembers - 1 - CALL h5gget_obj_info_idx_f(file_id, groupname1, i, name_buffer, type, & - error) - write(*,*) name_buffer, type - end do - - - ! - ! Get number of members in MyGroup/Group_A. - ! - CALL h5gn_members_f(file_id, "MyGroup/Group_A", nmembers, error) - write(*,*) "Number of group MyGroup/Group_A member is ", nmembers - - ! - ! Print each group member's name and type in "MyGroup/Group_A" group. - ! - do i = 0, nmembers - 1 - CALL h5gget_obj_info_idx_f(file_id,"MyGroup/Group_A" , i, name_buffer, type, & - error) - write(*,*) name_buffer, type - end do - - ! - ! Close the dataspace for the first dataset. - ! - CALL h5sclose_f(dataspace1_id, error) - - ! - ! Close the first dataset. - ! - CALL h5dclose_f(dataset1_id, error) - - ! - ! Close the dataspace for the second dataset. - ! - CALL h5sclose_f(dataspace2_id, error) - - ! - ! Close the second dataset. - ! - CALL h5dclose_f(dataset2_id, error) - - ! - ! Close the groups. - ! - CALL h5gclose_f(group1_id, error) - - CALL h5gclose_f(group2_id, error) - - ! - ! Close the file. - ! - CALL h5fclose_f(file_id, error) - - ! - ! Close FORTRAN interface. - ! - CALL h5close_f(error) - - END PROGRAM GRPITEXAMPLE diff --git a/doc/html/Tutor/examples/grpsexample.f90 b/doc/html/Tutor/examples/grpsexample.f90 deleted file mode 100644 index 4b53bf0..0000000 --- a/doc/html/Tutor/examples/grpsexample.f90 +++ /dev/null @@ -1,68 +0,0 @@ -! -! The following example code shows how to create groups -! using absolute and relative names. It creates three groups: -! the first two groups are created using the file identifier and -! the group absolute names, and the third group is created using -! a group identifier and the name relative to the specified group. -! - - - PROGRAM GRPSEXAMPLE - - USE HDF5 ! This module contains all necessary modules - - IMPLICIT NONE - - CHARACTER(LEN=10), PARAMETER :: filename = "groupsf.h5" ! File name - CHARACTER(LEN=8), PARAMETER :: groupname1 = "/MyGroup" ! Group name - CHARACTER(LEN=16), PARAMETER :: groupname2 = "/MyGroup/Group_A" - ! Group name - CHARACTER(LEN=7), PARAMETER :: groupname3 = "Group_B" ! Group name - - INTEGER(HID_T) :: file_id ! File identifier - INTEGER(HID_T) :: group1_id, group2_id, group3_id ! Group identifiers - - INTEGER :: error ! Error flag - ! - ! Initialize FORTRAN interface. - ! - CALL h5open_f(error) - - ! - ! Create a new file using default properties. - ! - CALL h5fcreate_f(filename, H5F_ACC_TRUNC_F, file_id, error) - - ! - ! Create group "MyGroup" in the root group using absolute name. - ! - CALL h5gcreate_f(file_id, groupname1, group1_id, error) - - ! - ! Create group "Group_A" in group "MyGroup" using absolute name. - ! - CALL h5gcreate_f(file_id, groupname2, group2_id, error) - - ! - ! Create group "Group_B" in group "MyGroup" using relative name. - ! - CALL h5gcreate_f(group1_id, groupname3, group3_id, error) - - ! - ! Close the groups. - ! - CALL h5gclose_f(group1_id, error) - CALL h5gclose_f(group2_id, error) - CALL h5gclose_f(group3_id, error) - - ! - ! Terminate access to the file. - ! - CALL h5fclose_f(file_id, error) - - ! - ! Close FORTRAN interface. - ! - CALL h5close_f(error) - - END PROGRAM GRPSEXAMPLE diff --git a/doc/html/Tutor/examples/h5_compound.c b/doc/html/Tutor/examples/h5_compound.c deleted file mode 100644 index 4bb4ad1..0000000 --- a/doc/html/Tutor/examples/h5_compound.c +++ /dev/null @@ -1,153 +0,0 @@ -/* - * This example shows how to create a compound data type, - * write an array which has the compound data type to the file, - * and read back fields' subsets. - */ - -#include "hdf5.h" - -#define FILE "SDScompound.h5" -#define DATASETNAME "ArrayOfStructures" -#define LENGTH 10 -#define RANK 1 - -int -main(void) -{ - - /* First structure and dataset*/ - typedef struct s1_t { - int a; - float b; - double c; - } s1_t; - s1_t s1[LENGTH]; - hid_t s1_tid; /* File datatype identifier */ - - /* Second structure (subset of s1_t) and dataset*/ - typedef struct s2_t { - double c; - int a; - } s2_t; - s2_t s2[LENGTH]; - hid_t s2_tid; /* Memory datatype handle */ - - /* Third "structure" ( will be used to read float field of s1) */ - hid_t s3_tid; /* Memory datatype handle */ - float s3[LENGTH]; - - int i; - hid_t file, dataset, space; /* Handles */ - herr_t status; - hsize_t dim[] = {LENGTH}; /* Dataspace dimensions */ - - - /* - * Initialize the data - */ - for (i = 0; i< LENGTH; i++) { - s1[i].a = i; - s1[i].b = i*i; - s1[i].c = 1./(i+1); - } - - /* - * Create the data space. - */ - space = H5Screate_simple(RANK, dim, NULL); - - /* - * Create the file. - */ - file = H5Fcreate(FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - - /* - * Create the memory data type. - */ - s1_tid = H5Tcreate (H5T_COMPOUND, sizeof(s1_t)); - H5Tinsert(s1_tid, "a_name", HOFFSET(s1_t, a), H5T_NATIVE_INT); - H5Tinsert(s1_tid, "c_name", HOFFSET(s1_t, c), H5T_NATIVE_DOUBLE); - H5Tinsert(s1_tid, "b_name", HOFFSET(s1_t, b), H5T_NATIVE_FLOAT); - - /* - * Create the dataset. - */ - dataset = H5Dcreate(file, DATASETNAME, s1_tid, space, H5P_DEFAULT); - - /* - * Wtite data to the dataset; - */ - status = H5Dwrite(dataset, s1_tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, s1); - - /* - * Release resources - */ - H5Tclose(s1_tid); - H5Sclose(space); - H5Dclose(dataset); - H5Fclose(file); - - /* - * Open the file and the dataset. - */ - file = H5Fopen(FILE, H5F_ACC_RDONLY, H5P_DEFAULT); - - dataset = H5Dopen(file, DATASETNAME); - - /* - * Create a data type for s2 - */ - s2_tid = H5Tcreate(H5T_COMPOUND, sizeof(s2_t)); - - H5Tinsert(s2_tid, "c_name", HOFFSET(s2_t, c), H5T_NATIVE_DOUBLE); - H5Tinsert(s2_tid, "a_name", HOFFSET(s2_t, a), H5T_NATIVE_INT); - - /* - * Read two fields c and a from s1 dataset. Fields in the file - * are found by their names "c_name" and "a_name". - */ - status = H5Dread(dataset, s2_tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, s2); - - /* - * Display the fields - */ - printf("\n"); - printf("Field c : \n"); - for( i = 0; i < LENGTH; i++) printf("%.4f ", s2[i].c); - printf("\n"); - - printf("\n"); - printf("Field a : \n"); - for( i = 0; i < LENGTH; i++) printf("%d ", s2[i].a); - printf("\n"); - - /* - * Create a data type for s3. - */ - s3_tid = H5Tcreate(H5T_COMPOUND, sizeof(float)); - - status = H5Tinsert(s3_tid, "b_name", 0, H5T_NATIVE_FLOAT); - - /* - * Read field b from s1 dataset. Field in the file is found by its name. - */ - status = H5Dread(dataset, s3_tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, s3); - - /* - * Display the field - */ - printf("\n"); - printf("Field b : \n"); - for( i = 0; i < LENGTH; i++) printf("%.4f ", s3[i]); - printf("\n"); - - /* - * Release resources - */ - H5Tclose(s2_tid); - H5Tclose(s3_tid); - H5Dclose(dataset); - H5Fclose(file); - - return 0; -} diff --git a/doc/html/Tutor/examples/h5_copy.c b/doc/html/Tutor/examples/h5_copy.c deleted file mode 100644 index 357596b..0000000 --- a/doc/html/Tutor/examples/h5_copy.c +++ /dev/null @@ -1,148 +0,0 @@ -/***********************************************************************/ -/* */ -/* PROGRAM: h5_copy.c */ -/* PURPOSE: Shows how to use the H5SCOPY function. */ -/* DESCRIPTION: */ -/* This program creates two files, copy1.h5, and copy2.h5. */ -/* In copy1.h5, it creates a 3x4 dataset called 'Copy1', */ -/* and write 0's to this dataset. */ -/* In copy2.h5, it create a 3x4 dataset called 'Copy2', */ -/* and write 1's to this dataset. */ -/* It closes both files, reopens both files, selects two */ -/* points in copy1.h5 and writes values to them. Then it */ -/* does an H5Scopy from the first file to the second, and */ -/* writes the values to copy2.h5. It then closes the */ -/* files, reopens them, and prints the contents of the */ -/* two datasets. */ -/* */ -/***********************************************************************/ - -#include "hdf5.h" -#define FILE1 "copy1.h5" -#define FILE2 "copy2.h5" - -#define RANK 2 -#define DIM1 3 -#define DIM2 4 -#define NUMP 2 - -int main (void) -{ - hid_t file1, file2, dataset1, dataset2; - hid_t mid1, mid2, fid1, fid2; - hsize_t fdim[] = {DIM1, DIM2}; - hsize_t mdim[] = {DIM1, DIM2}; - hsize_t start[2], stride[2], count[2], block[2]; - int buf1[DIM1][DIM2]; - int buf2[DIM1][DIM2]; - int bufnew[DIM1][DIM2]; - int val[] = {53, 59}; - hsize_t marray[] = {2}; - hsize_t coord[NUMP][RANK]; - herr_t ret; - uint i, j; - -/***********************************************************************/ -/* */ -/* Create two files containing identical datasets. Write 0's to one */ -/* and 1's to the other. */ -/* */ -/***********************************************************************/ - - for ( i = 0; i < DIM1; i++ ) - for ( j = 0; j < DIM2; j++ ) - buf1[i][j] = 0; - - for ( i = 0; i < DIM1; i++ ) - for ( j = 0; j < DIM2; j++ ) - buf2[i][j] = 1; - - file1 = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - file2 = H5Fcreate(FILE2, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - - fid1 = H5Screate_simple (RANK, fdim, NULL); - fid2 = H5Screate_simple (RANK, fdim, NULL); - - dataset1 = H5Dcreate (file1, "Copy1", H5T_NATIVE_INT, fid1, H5P_DEFAULT); - dataset2 = H5Dcreate (file2, "Copy2", H5T_NATIVE_INT, fid2, H5P_DEFAULT); - - ret = H5Dwrite(dataset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf1); - ret = H5Dwrite(dataset2, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf2); - - ret = H5Dclose (dataset1); - ret = H5Dclose (dataset2); - - ret = H5Sclose (fid1); - ret = H5Sclose (fid2); - - ret = H5Fclose (file1); - ret = H5Fclose (file2); - -/***********************************************************************/ -/* */ -/* Open the two files. Select two points in one file, write values to */ -/* those point locations, then do H5Scopy and write the values to the */ -/* other file. Close files. */ -/* */ -/***********************************************************************/ - - file1 = H5Fopen (FILE1, H5F_ACC_RDWR, H5P_DEFAULT); - file2 = H5Fopen (FILE2, H5F_ACC_RDWR, H5P_DEFAULT); - dataset1 = H5Dopen (file1, "Copy1"); - dataset2 = H5Dopen (file2, "Copy2"); - fid1 = H5Dget_space (dataset1); - mid1 = H5Screate_simple(1, marray, NULL); - coord[0][0] = 0; coord[0][1] = 3; - coord[1][0] = 0; coord[1][1] = 1; - - ret = H5Sselect_elements (fid1, H5S_SELECT_SET, NUMP, (const hsize_t **)coord); - - ret = H5Dwrite (dataset1, H5T_NATIVE_INT, mid1, fid1, H5P_DEFAULT, val); - - fid2 = H5Scopy (fid1); - - ret = H5Dwrite (dataset2, H5T_NATIVE_INT, mid1, fid2, H5P_DEFAULT, val); - - ret = H5Dclose (dataset1); - ret = H5Dclose (dataset2); - ret = H5Sclose (fid1); - ret = H5Sclose (fid2); - ret = H5Fclose (file1); - ret = H5Fclose (file2); - ret = H5Sclose (mid1); - -/***********************************************************************/ -/* */ -/* Open both files and print the contents of the datasets. */ -/* */ -/***********************************************************************/ - - file1 = H5Fopen (FILE1, H5F_ACC_RDWR, H5P_DEFAULT); - file2 = H5Fopen (FILE2, H5F_ACC_RDWR, H5P_DEFAULT); - dataset1 = H5Dopen (file1, "Copy1"); - dataset2 = H5Dopen (file2, "Copy2"); - - ret = H5Dread (dataset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, - H5P_DEFAULT, bufnew); - - printf ("\nDataset 'Copy1' in file 'copy1.h5' contains: \n"); - for (i=0;i -#define FILE "dset.h5" - -main() { - - hid_t file_id, dataset_id, attribute_id, dataspace_id; /* identifiers */ - hsize_t dims; - int attr_data[2]; - herr_t status; - - /* Initialize the attribute data. */ - attr_data[0] = 100; - attr_data[1] = 200; - - /* Open an existing file. */ - file_id = H5Fopen(FILE, H5F_ACC_RDWR, H5P_DEFAULT); - - /* Open an existing dataset. */ - dataset_id = H5Dopen(file_id, "/dset"); - - /* Create the data space for the attribute. */ - dims = 2; - dataspace_id = H5Screate_simple(1, &dims, NULL); - - /* Create a dataset attribute. */ - attribute_id = H5Acreate(dataset_id, "attr", H5T_STD_I32BE, dataspace_id, H5P_DEFAULT); - - /* Write the attribute data. */ - status = H5Awrite(attribute_id, H5T_NATIVE_INT, attr_data); - - /* Close the attribute. */ - status = H5Aclose(attribute_id); - - /* Close the dataspace. */ - status = H5Sclose(dataspace_id); - - /* Close to the dataset. */ - status = H5Dclose(dataset_id); - - /* Close the file. */ - status = H5Fclose(file_id); -} diff --git a/doc/html/Tutor/examples/h5_crtdat.c b/doc/html/Tutor/examples/h5_crtdat.c deleted file mode 100644 index d704cf1..0000000 --- a/doc/html/Tutor/examples/h5_crtdat.c +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Creating and closing a dataset. - */ - -#include -#define FILE "dset.h5" - -main() { - - hid_t file_id, dataset_id, dataspace_id; /* identifiers */ - hsize_t dims[2]; - herr_t status; - - /* Create a new file using default properties. */ - file_id = H5Fcreate(FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - - /* Create the data space for the dataset. */ - dims[0] = 4; - dims[1] = 6; - dataspace_id = H5Screate_simple(2, dims, NULL); - - /* Create the dataset. */ - dataset_id = H5Dcreate(file_id, "/dset", H5T_STD_I32BE, dataspace_id, H5P_DEFAULT); - - /* End access to the dataset and release resources used by it. */ - status = H5Dclose(dataset_id); - - /* Terminate access to the data space. */ - status = H5Sclose(dataspace_id); - - /* Close the file. */ - status = H5Fclose(file_id); -} - diff --git a/doc/html/Tutor/examples/h5_crtfile.c b/doc/html/Tutor/examples/h5_crtfile.c deleted file mode 100644 index 680cde8..0000000 --- a/doc/html/Tutor/examples/h5_crtfile.c +++ /dev/null @@ -1,19 +0,0 @@ -/* - * Creating and closing a file. - */ - -#include -#define FILE "file.h5" - -main() { - - hid_t file_id; /* file identifier */ - herr_t status; - - /* Create a new file using default properties. */ - file_id = H5Fcreate(FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - - /* Terminate access to the file. */ - status = H5Fclose(file_id); -} - diff --git a/doc/html/Tutor/examples/h5_crtgrp.c b/doc/html/Tutor/examples/h5_crtgrp.c deleted file mode 100644 index a6aad93..0000000 --- a/doc/html/Tutor/examples/h5_crtgrp.c +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Creating and closing a group. - */ - -#include -#define FILE "group.h5" - -main() { - - hid_t file_id, group_id; /* identifiers */ - herr_t status; - - /* Create a new file using default properties. */ - file_id = H5Fcreate(FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - - /* Create a group named "/MyGroup" in the file. */ - group_id = H5Gcreate(file_id, "/MyGroup", 0); - - /* Close the group. */ - status = H5Gclose(group_id); - - /* Terminate access to the file. */ - status = H5Fclose(file_id); -} diff --git a/doc/html/Tutor/examples/h5_crtgrpar.c b/doc/html/Tutor/examples/h5_crtgrpar.c deleted file mode 100644 index 6dcc003..0000000 --- a/doc/html/Tutor/examples/h5_crtgrpar.c +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Creating groups using absolute and relative names. - */ - -#include -#define FILE "groups.h5" - -main() { - - hid_t file_id, group1_id, group2_id, group3_id; /* identifiers */ - herr_t status; - - /* Create a new file using default properties. */ - file_id = H5Fcreate(FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - - /* Create group "MyGroup" in the root group using absolute name. */ - group1_id = H5Gcreate(file_id, "/MyGroup", 0); - - /* Create group "Group_A" in group "MyGroup" using absolute name. */ - group2_id = H5Gcreate(file_id, "/MyGroup/Group_A", 0); - - /* Create group "Group_B" in group "MyGroup" using relative name. */ - group3_id = H5Gcreate(group1_id, "Group_B", 0); - - /* Close groups. */ - status = H5Gclose(group1_id); - status = H5Gclose(group2_id); - status = H5Gclose(group3_id); - - /* Close the file. */ - status = H5Fclose(file_id); -} diff --git a/doc/html/Tutor/examples/h5_crtgrpd.c b/doc/html/Tutor/examples/h5_crtgrpd.c deleted file mode 100644 index e497764..0000000 --- a/doc/html/Tutor/examples/h5_crtgrpd.c +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Create two datasets within groups. - */ - -#include -#define FILE "groups.h5" - -main() { - - hid_t file_id, group_id, dataset_id, dataspace_id; /* identifiers */ - hsize_t dims[2]; - herr_t status; - int i, j, dset1_data[3][3], dset2_data[2][10]; - - /* Initialize the first dataset. */ - for (i = 0; i < 3; i++) - for (j = 0; j < 3; j++) - dset1_data[i][j] = j + 1; - - /* Initialize the second dataset. */ - for (i = 0; i < 2; i++) - for (j = 0; j < 10; j++) - dset2_data[i][j] = j + 1; - - /* Open an existing file. */ - file_id = H5Fopen(FILE, H5F_ACC_RDWR, H5P_DEFAULT); - - /* Create the data space for the first dataset. */ - dims[0] = 3; - dims[1] = 3; - dataspace_id = H5Screate_simple(2, dims, NULL); - - /* Create a dataset in group "MyGroup". */ - dataset_id = H5Dcreate(file_id, "/MyGroup/dset1", H5T_STD_I32BE, dataspace_id, - H5P_DEFAULT); - - /* Write the first dataset. */ - status = H5Dwrite(dataset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, - dset1_data); - - /* Close the data space for the first dataset. */ - status = H5Sclose(dataspace_id); - - /* Close the first dataset. */ - status = H5Dclose(dataset_id); - - /* Open an existing group of the specified file. */ - group_id = H5Gopen(file_id, "/MyGroup/Group_A"); - - /* Create the data space for the second dataset. */ - dims[0] = 2; - dims[1] = 10; - dataspace_id = H5Screate_simple(2, dims, NULL); - - /* Create the second dataset in group "Group_A". */ - dataset_id = H5Dcreate(group_id, "dset2", H5T_STD_I32BE, dataspace_id, H5P_DEFAULT); - - /* Write the second dataset. */ - status = H5Dwrite(dataset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, - dset2_data); - - /* Close the data space for the second dataset. */ - status = H5Sclose(dataspace_id); - - /* Close the second dataset */ - status = H5Dclose(dataset_id); - - /* Close the group. */ - status = H5Gclose(group_id); - - /* Close the file. */ - status = H5Fclose(file_id); -} - diff --git a/doc/html/Tutor/examples/h5_extend.c b/doc/html/Tutor/examples/h5_extend.c deleted file mode 100644 index 1f81827..0000000 --- a/doc/html/Tutor/examples/h5_extend.c +++ /dev/null @@ -1,141 +0,0 @@ -/************************************************************** - * - * This example shows how to work with extendible datasets. - * In the current version of the library a dataset MUST be - * chunked in order to be extendible. - * - * This example is derived from the h5_extend_write.c and - * h5_read_chunk.c examples that are in the "Introduction - * to HDF5". - * - *************************************************************/ - -#include "hdf5.h" - -#define FILE "ext.h5" -#define DATASETNAME "ExtendibleArray" -#define RANK 2 - -int -main (void) -{ - hid_t file; /* handles */ - hid_t dataspace, dataset; - hid_t filespace; - hid_t cparms; - hid_t memspace; - - hsize_t dims[2] = { 3, 3}; /* dataset dimensions - at creation time */ - hsize_t dims1[2] = { 3, 3}; /* data1 dimensions */ - hsize_t dims2[2] = { 7, 1}; /* data2 dimensions */ - - hsize_t maxdims[2] = {H5S_UNLIMITED, H5S_UNLIMITED}; - hsize_t size[2]; - hsize_t offset[2]; - hsize_t i,j; - herr_t status, status_n; - int data1[3][3] = { {1, 1, 1}, /* data to write */ - {1, 1, 1}, - {1, 1, 1} }; - - int data2[7] = { 2, 2, 2, 2, 2, 2, 2}; - - /* Variables used in reading data back */ - hsize_t chunk_dims[2] ={2, 5}; - hsize_t chunk_dimsr[2]; - hsize_t dimsr[2]; - int data_out[10][3]; - int rank, rank_chunk; - - /* Create the data space with unlimited dimensions. */ - dataspace = H5Screate_simple (RANK, dims, maxdims); - - /* Create a new file. If file exists its contents will be overwritten. */ - file = H5Fcreate (FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - - /* Modify dataset creation properties, i.e. enable chunking */ - cparms = H5Pcreate (H5P_DATASET_CREATE); - status = H5Pset_chunk ( cparms, RANK, chunk_dims); - - /* Create a new dataset within the file using cparms - creation properties. */ - dataset = H5Dcreate (file, DATASETNAME, H5T_NATIVE_INT, dataspace, - cparms); - - /* Extend the dataset. This call assures that dataset is 3 x 3.*/ - size[0] = 3; - size[1] = 3; - status = H5Dextend (dataset, size); - - /* Select a hyperslab */ - filespace = H5Dget_space (dataset); - offset[0] = 0; - offset[1] = 0; - status = H5Sselect_hyperslab (filespace, H5S_SELECT_SET, offset, NULL, - dims1, NULL); - - /* Write the data to the hyperslab */ - status = H5Dwrite (dataset, H5T_NATIVE_INT, dataspace, filespace, - H5P_DEFAULT, data1); - - /* Extend the dataset. Dataset becomes 10 x 3 */ - dims[0] = dims1[0] + dims2[0]; - size[0] = dims[0]; - size[1] = dims[1]; - status = H5Dextend (dataset, size); - - /* Select a hyperslab */ - filespace = H5Dget_space (dataset); - offset[0] = 3; - offset[1] = 0; - status = H5Sselect_hyperslab (filespace, H5S_SELECT_SET, offset, NULL, - dims2, NULL); - - /* Define memory space */ - dataspace = H5Screate_simple (RANK, dims2, NULL); - - /* Write the data to the hyperslab */ - status = H5Dwrite (dataset, H5T_NATIVE_INT, dataspace, filespace, - H5P_DEFAULT, data2); - - /* Close resources */ - status = H5Dclose (dataset); - status = H5Sclose (dataspace); - status = H5Sclose (filespace); - status = H5Fclose (file); - -/**************************************************************** - Read the data back - ***************************************************************/ - - file = H5Fopen (FILE, H5F_ACC_RDONLY, H5P_DEFAULT); - dataset = H5Dopen (file, DATASETNAME); - filespace = H5Dget_space (dataset); - rank = H5Sget_simple_extent_ndims (filespace); - status_n = H5Sget_simple_extent_dims (filespace, dimsr, NULL); - - cparms = H5Dget_create_plist (dataset); - if (H5D_CHUNKED == H5Pget_layout (cparms)) - { - rank_chunk = H5Pget_chunk (cparms, 2, chunk_dimsr); - } - - memspace = H5Screate_simple (rank,dimsr,NULL); - status = H5Dread (dataset, H5T_NATIVE_INT, memspace, filespace, - H5P_DEFAULT, data_out); - printf("\n"); - printf("Dataset: \n"); - for (j = 0; j < dimsr[0]; j++) - { - for (i = 0; i < dimsr[1]; i++) - printf("%d ", data_out[j][i]); - printf("\n"); - } - - status = H5Pclose (cparms); - status = H5Dclose (dataset); - status = H5Sclose (filespace); - status = H5Sclose (memspace); - status = H5Fclose (file); -} diff --git a/doc/html/Tutor/examples/h5_hyperslab.c b/doc/html/Tutor/examples/h5_hyperslab.c deleted file mode 100644 index 120e30d..0000000 --- a/doc/html/Tutor/examples/h5_hyperslab.c +++ /dev/null @@ -1,192 +0,0 @@ -/************************************************************ - - This example shows how to write and read a hyperslab. It - is derived from the h5_read.c and h5_write.c examples in - the "Introduction to HDF5". - - ************************************************************/ - -#include "hdf5.h" - -#define FILE "sds.h5" -#define DATASETNAME "IntArray" -#define NX_SUB 3 /* hyperslab dimensions */ -#define NY_SUB 4 -#define NX 7 /* output buffer dimensions */ -#define NY 7 -#define NZ 3 -#define RANK 2 -#define RANK_OUT 3 - -#define X 5 /* dataset dimensions */ -#define Y 6 - -int -main (void) -{ - hsize_t dimsf[2]; /* dataset dimensions */ - int data[X][Y]; /* data to write */ - - /* - * Data and output buffer initialization. - */ - hid_t file, dataset; /* handles */ - hid_t dataspace; - hid_t memspace; - hsize_t dimsm[3]; /* memory space dimensions */ - hsize_t dims_out[2]; /* dataset dimensions */ - herr_t status; - - int data_out[NX][NY][NZ ]; /* output buffer */ - - hsize_t count[2]; /* size of the hyperslab in the file */ - hsize_t offset[2]; /* hyperslab offset in the file */ - hsize_t count_out[3]; /* size of the hyperslab in memory */ - hsize_t offset_out[3]; /* hyperslab offset in memory */ - int i, j, k, status_n, rank; - - - -/********************************************************* - This writes data to the HDF5 file. - *********************************************************/ - - /* - * Data and output buffer initialization. - */ - for (j = 0; j < X; j++) { - for (i = 0; i < Y; i++) - data[j][i] = i + j; - } - /* - * 0 1 2 3 4 5 - * 1 2 3 4 5 6 - * 2 3 4 5 6 7 - * 3 4 5 6 7 8 - * 4 5 6 7 8 9 - */ - - /* - * Create a new file using H5F_ACC_TRUNC access, - * the default file creation properties, and the default file - * access properties. - */ - file = H5Fcreate (FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - - /* - * Describe the size of the array and create the data space for fixed - * size dataset. - */ - dimsf[0] = X; - dimsf[1] = Y; - dataspace = H5Screate_simple (RANK, dimsf, NULL); - - /* - * Create a new dataset within the file using defined dataspace and - * default dataset creation properties. - */ - dataset = H5Dcreate (file, DATASETNAME, H5T_STD_I32BE, dataspace, - H5P_DEFAULT); - - /* - * Write the data to the dataset using default transfer properties. - */ - status = H5Dwrite (dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, - H5P_DEFAULT, data); - - /* - * Close/release resources. - */ - H5Sclose (dataspace); - H5Dclose (dataset); - H5Fclose (file); - - -/************************************************************* - - This reads the hyperslab from the sds.h5 file just - created, into a 2-dimensional plane of the 3-dimensional - array. - - ************************************************************/ - - for (j = 0; j < NX; j++) { - for (i = 0; i < NY; i++) { - for (k = 0; k < NZ ; k++) - data_out[j][i][k] = 0; - } - } - - /* - * Open the file and the dataset. - */ - file = H5Fopen (FILE, H5F_ACC_RDONLY, H5P_DEFAULT); - dataset = H5Dopen (file, DATASETNAME); - - dataspace = H5Dget_space (dataset); /* dataspace handle */ - rank = H5Sget_simple_extent_ndims (dataspace); - status_n = H5Sget_simple_extent_dims (dataspace, dims_out, NULL); - printf("\nRank: %d\nDimensions: %lu x %lu \n", rank, - (unsigned long)(dims_out[0]), (unsigned long)(dims_out[1])); - - /* - * Define hyperslab in the dataset. - */ - offset[0] = 1; - offset[1] = 2; - count[0] = NX_SUB; - count[1] = NY_SUB; - status = H5Sselect_hyperslab (dataspace, H5S_SELECT_SET, offset, NULL, - count, NULL); - - /* - * Define the memory dataspace. - */ - dimsm[0] = NX; - dimsm[1] = NY; - dimsm[2] = NZ; - memspace = H5Screate_simple (RANK_OUT, dimsm, NULL); - - /* - * Define memory hyperslab. - */ - offset_out[0] = 3; - offset_out[1] = 0; - offset_out[2] = 0; - count_out[0] = NX_SUB; - count_out[1] = NY_SUB; - count_out[2] = 1; - status = H5Sselect_hyperslab (memspace, H5S_SELECT_SET, offset_out, NULL, - count_out, NULL); - - /* - * Read data from hyperslab in the file into the hyperslab in - * memory and display. - */ - status = H5Dread (dataset, H5T_NATIVE_INT, memspace, dataspace, - H5P_DEFAULT, data_out); - printf ("Data:\n "); - for (j = 0; j < NX; j++) { - for (i = 0; i < NY; i++) printf("%d ", data_out[j][i][0]); - printf("\n "); - } - printf("\n"); - /* - * 0 0 0 0 0 0 0 - * 0 0 0 0 0 0 0 - * 0 0 0 0 0 0 0 - * 3 4 5 6 0 0 0 - * 4 5 6 7 0 0 0 - * 5 6 7 8 0 0 0 - * 0 0 0 0 0 0 0 - */ - - /* - * Close and release resources. - */ - H5Dclose (dataset); - H5Sclose (dataspace); - H5Sclose (memspace); - H5Fclose (file); - -} diff --git a/doc/html/Tutor/examples/h5_iterate.c b/doc/html/Tutor/examples/h5_iterate.c deleted file mode 100644 index db58183..0000000 --- a/doc/html/Tutor/examples/h5_iterate.c +++ /dev/null @@ -1,111 +0,0 @@ -#include - -#define FILE "iterate.h5" -#define FALSE 0 - -/* 1-D dataset with fixed dimensions */ -#define SPACE1_NAME "Space1" -#define SPACE1_RANK 1 -#define SPACE1_DIM1 4 - -herr_t file_info(hid_t loc_id, const char *name, void *opdata); - /* Operator function */ -int -main(void) { - hid_t file; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t group; /* Group ID */ - hid_t sid; /* Dataspace ID */ - hid_t tid; /* Datatype ID */ - hsize_t dims[] = {SPACE1_DIM1}; - herr_t ret; /* Generic return value */ - -/* Compound datatype */ -typedef struct s1_t { - unsigned int a; - unsigned int b; - float c; -} s1_t; - - /* Create file */ - file = H5Fcreate(FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - - /* Create dataspace for datasets */ - sid = H5Screate_simple(SPACE1_RANK, dims, NULL); - - /* Create a group */ - group=H5Gcreate(file,"Group1",-1); - - /* Close a group */ - ret = H5Gclose(group); - - /* Create a dataset */ - dataset=H5Dcreate(file,"Dataset1",H5T_STD_U32LE,sid,H5P_DEFAULT); - - /* Close Dataset */ - ret = H5Dclose(dataset); - - /* Create a datatype */ - tid = H5Tcreate (H5T_COMPOUND, sizeof(s1_t)); - - /* Insert fields */ - ret=H5Tinsert (tid, "a", HOFFSET(s1_t,a), H5T_NATIVE_INT); - - ret=H5Tinsert (tid, "b", HOFFSET(s1_t,b), H5T_NATIVE_INT); - - ret=H5Tinsert (tid, "c", HOFFSET(s1_t,c), H5T_NATIVE_FLOAT); - - /* Save datatype for later */ - ret=H5Tcommit (file, "Datatype1", tid); - - /* Close datatype */ - ret = H5Tclose(tid); - - /* Iterate through the file to see members of the root group */ - - printf(" Objects in the root group are:\n"); - printf("\n"); - - H5Giterate(file, "/", NULL, file_info, NULL); - - /* Close file */ - ret = H5Fclose(file); - - return 0; -} - -/* - * Operator function. - */ -herr_t file_info(hid_t loc_id, const char *name, void *opdata) -{ - H5G_stat_t statbuf; - - /* - * Get type of the object and display its name and type. - * The name of the object is passed to this function by - * the Library. Some magic :-) - */ - H5Gget_objinfo(loc_id, name, FALSE, &statbuf); - switch (statbuf.type) { - case H5G_GROUP: - printf(" Object with name %s is a group \n", name); - break; - case H5G_DATASET: - printf(" Object with name %s is a dataset \n", name); - break; - case H5G_TYPE: - printf(" Object with name %s is a named datatype \n", name); - break; - default: - printf(" Unable to identify an object "); - } - return 0; - } - - - - - - - diff --git a/doc/html/Tutor/examples/h5_mount.c b/doc/html/Tutor/examples/h5_mount.c deleted file mode 100644 index 452ad6e..0000000 --- a/doc/html/Tutor/examples/h5_mount.c +++ /dev/null @@ -1,119 +0,0 @@ -/* - * This program shows the concept of "mounting files". - * Program creates one file with group G in it, and another - * file with dataset D. Then second file is mounted in the first one - * under the "mounting point" G. Dataset D is accessed in the first file - * under name /G/D and data is printed out. - */ - -#include - -#define FILE1 "mount1.h5" -#define FILE2 "mount2.h5" - -#define RANK 2 -#define NX 4 -#define NY 5 - -int main(void) -{ - - hid_t fid1, fid2, gid; /* Files and group identifiers */ - hid_t did, tid, sid; /* Dataset and datatype identifiers */ - - herr_t status; - hsize_t dims[] = {NX,NY}; /* Dataset dimensions */ - - int i, j; - int bm[NX][NY], bm_out[NX][NY]; /* Data buffers */ - - /* - * Initialization of buffer matrix "bm" - */ - for(i =0; i -#define FILE "dset.h5" - -main() { - - hid_t file_id, dataset_id; /* identifiers */ - herr_t status; - int i, j, dset_data[4][6]; - - /* Initialize the dataset. */ - for (i = 0; i < 4; i++) - for (j = 0; j < 6; j++) - dset_data[i][j] = i * 6 + j + 1; - - /* Open an existing file. */ - file_id = H5Fopen(FILE, H5F_ACC_RDWR, H5P_DEFAULT); - - /* Open an existing dataset. */ - dataset_id = H5Dopen(file_id, "/dset"); - - /* Write the dataset. */ - status = H5Dwrite(dataset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, - dset_data); - - status = H5Dread(dataset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, - dset_data); - - /* Close the dataset. */ - status = H5Dclose(dataset_id); - - /* Close the file. */ - status = H5Fclose(file_id); -} diff --git a/doc/html/Tutor/examples/h5_read.c b/doc/html/Tutor/examples/h5_read.c deleted file mode 100644 index 8f2f179..0000000 --- a/doc/html/Tutor/examples/h5_read.c +++ /dev/null @@ -1,136 +0,0 @@ -/* - * This example reads hyperslab from the SDS.h5 file - * created by h5_write.c program into two-dimensional - * plane of the three-dimensional array. - * Information about dataset in the SDS.h5 file is obtained. - */ - -#include "hdf5.h" - -#define FILE "SDS.h5" -#define DATASETNAME "IntArray" -#define NX_SUB 3 /* hyperslab dimensions */ -#define NY_SUB 4 -#define NX 7 /* output buffer dimensions */ -#define NY 7 -#define NZ 3 -#define RANK 2 -#define RANK_OUT 3 - -int -main (void) -{ - hid_t file, dataset; /* handles */ - hid_t datatype, dataspace; - hid_t memspace; - H5T_class_t class; /* data type class */ - H5T_order_t order; /* data order */ - size_t size; /* - * size of the data element - * stored in file - */ - hsize_t dimsm[3]; /* memory space dimensions */ - hsize_t dims_out[2]; /* dataset dimensions */ - herr_t status; - - int data_out[NX][NY][NZ ]; /* output buffer */ - - hsize_t count[2]; /* size of the hyperslab in the file */ - hsize_t offset[2]; /* hyperslab offset in the file */ - hsize_t count_out[3]; /* size of the hyperslab in memory */ - hsize_t offset_out[3]; /* hyperslab offset in memory */ - int i, j, k, status_n, rank; - - for (j = 0; j < NX; j++) { - for (i = 0; i < NY; i++) { - for (k = 0; k < NZ ; k++) - data_out[j][i][k] = 0; - } - } - - /* - * Open the file and the dataset. - */ - file = H5Fopen(FILE, H5F_ACC_RDONLY, H5P_DEFAULT); - dataset = H5Dopen(file, DATASETNAME); - - /* - * Get datatype and dataspace handles and then query - * dataset class, order, size, rank and dimensions. - */ - datatype = H5Dget_type(dataset); /* datatype handle */ - class = H5Tget_class(datatype); - if (class == H5T_INTEGER) printf("Data set has INTEGER type \n"); - order = H5Tget_order(datatype); - if (order == H5T_ORDER_LE) printf("Little endian order \n"); - - size = H5Tget_size(datatype); - printf(" Data size is %d \n", size); - - dataspace = H5Dget_space(dataset); /* dataspace handle */ - rank = H5Sget_simple_extent_ndims(dataspace); - status_n = H5Sget_simple_extent_dims(dataspace, dims_out, NULL); - printf("rank %d, dimensions %lu x %lu \n", rank, - (unsigned long)(dims_out[0]), (unsigned long)(dims_out[1])); - - /* - * Define hyperslab in the dataset. - */ - offset[0] = 1; - offset[1] = 2; - count[0] = NX_SUB; - count[1] = NY_SUB; - status = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, NULL, - count, NULL); - - /* - * Define the memory dataspace. - */ - dimsm[0] = NX; - dimsm[1] = NY; - dimsm[2] = NZ ; - memspace = H5Screate_simple(RANK_OUT,dimsm,NULL); - - /* - * Define memory hyperslab. - */ - offset_out[0] = 3; - offset_out[1] = 0; - offset_out[2] = 0; - count_out[0] = NX_SUB; - count_out[1] = NY_SUB; - count_out[2] = 1; - status = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, offset_out, NULL, - count_out, NULL); - - /* - * Read data from hyperslab in the file into the hyperslab in - * memory and display. - */ - status = H5Dread(dataset, H5T_NATIVE_INT, memspace, dataspace, - H5P_DEFAULT, data_out); - for (j = 0; j < NX; j++) { - for (i = 0; i < NY; i++) printf("%d ", data_out[j][i][0]); - printf("\n"); - } - /* - * 0 0 0 0 0 0 0 - * 0 0 0 0 0 0 0 - * 0 0 0 0 0 0 0 - * 3 4 5 6 0 0 0 - * 4 5 6 7 0 0 0 - * 5 6 7 8 0 0 0 - * 0 0 0 0 0 0 0 - */ - - /* - * Close/release resources. - */ - H5Tclose(datatype); - H5Dclose(dataset); - H5Sclose(dataspace); - H5Sclose(memspace); - H5Fclose(file); - - return 0; -} diff --git a/doc/html/Tutor/examples/h5_ref2objr.c b/doc/html/Tutor/examples/h5_ref2objr.c deleted file mode 100644 index a226778..0000000 --- a/doc/html/Tutor/examples/h5_ref2objr.c +++ /dev/null @@ -1,93 +0,0 @@ -#include -#include - -#define FILE1 "trefer1.h5" - -/* dataset with fixed dimensions */ -#define SPACE1_NAME "Space1" -#define SPACE1_RANK 1 -#define SPACE1_DIM1 4 - -int -main(void) -{ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset, /* Dataset ID */ - dset2; /* Dereferenced dataset ID */ - hid_t group; /* Group ID */ - hid_t sid1; /* Dataspace ID */ - hid_t tid1; /* Datatype ID */ - hobj_ref_t *rbuf; /* buffer to read from disk */ - int *tu32; /* temp. buffer read from disk */ - int i; /* counting variables */ - char read_comment[10]; - herr_t ret; /* Generic return value */ - - /* Allocate read buffers */ - rbuf = malloc(sizeof(hobj_ref_t)*SPACE1_DIM1); - tu32 = malloc(sizeof(int)*SPACE1_DIM1); - - /* Open the file */ - fid1 = H5Fopen(FILE1, H5F_ACC_RDWR, H5P_DEFAULT); - - /* Open the dataset */ - dataset=H5Dopen(fid1,"/Dataset3"); - - /* Read selection from disk */ - ret=H5Dread(dataset,H5T_STD_REF_OBJ,H5S_ALL,H5S_ALL,H5P_DEFAULT,rbuf); - - /* Open dataset object */ - dset2 = H5Rdereference(dataset,H5R_OBJECT,&rbuf[0]); - - /* Check information in referenced dataset */ - sid1 = H5Dget_space(dset2); - - ret=H5Sget_simple_extent_npoints(sid1); - - /* Read from disk */ - ret=H5Dread(dset2,H5T_NATIVE_INT,H5S_ALL,H5S_ALL,H5P_DEFAULT,tu32); - printf("Dataset data : \n"); - for (i=0; i < SPACE1_DIM1 ; i++) printf (" %d ", tu32[i]); - printf("\n"); - printf("\n"); - - /* Close dereferenced Dataset */ - ret = H5Dclose(dset2); - - /* Open group object */ - group = H5Rdereference(dataset,H5R_OBJECT,&rbuf[2]); - - /* Get group's comment */ - ret=H5Gget_comment(group,".",10,read_comment); - printf("Group comment is %s \n", read_comment); - printf(" \n"); - /* Close group */ - ret = H5Gclose(group); - - /* Open datatype object */ - tid1 = H5Rdereference(dataset,H5R_OBJECT,&rbuf[3]); - - /* Verify correct datatype */ - { - H5T_class_t tclass; - - tclass= H5Tget_class(tid1); - if ((tclass == H5T_COMPOUND)) - printf ("Number of compound datatype members is %d \n", H5Tget_nmembers(tid1)); - printf(" \n"); - } - - /* Close datatype */ - ret = H5Tclose(tid1); - - /* Close Dataset */ - ret = H5Dclose(dataset); - - /* Close file */ - ret = H5Fclose(fid1); - - /* Free memory buffers */ - free(rbuf); - free(tu32); - return 0; -} diff --git a/doc/html/Tutor/examples/h5_ref2objw.c b/doc/html/Tutor/examples/h5_ref2objw.c deleted file mode 100644 index d499f8e..0000000 --- a/doc/html/Tutor/examples/h5_ref2objw.c +++ /dev/null @@ -1,120 +0,0 @@ - -#include - -#define FILE1 "trefer1.h5" - -/* 1-D dataset with fixed dimensions */ -#define SPACE1_NAME "Space1" -#define SPACE1_RANK 1 -#define SPACE1_DIM1 4 - -/* 2-D dataset with fixed dimensions */ -#define SPACE2_NAME "Space2" -#define SPACE2_RANK 2 -#define SPACE2_DIM1 10 -#define SPACE2_DIM2 10 - -int -main(void) { - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t group; /* Group ID */ - hid_t sid1; /* Dataspace ID */ - hid_t tid1; /* Datatype ID */ - hsize_t dims1[] = {SPACE1_DIM1}; - hobj_ref_t *wbuf; /* buffer to write to disk */ - int *tu32; /* Temporary pointer to int data */ - int i; /* counting variables */ - const char *write_comment="Foo!"; /* Comments for group */ - herr_t ret; /* Generic return value */ - -/* Compound datatype */ -typedef struct s1_t { - unsigned int a; - unsigned int b; - float c; -} s1_t; - - /* Allocate write buffers */ - wbuf=(hobj_ref_t *)malloc(sizeof(hobj_ref_t)*SPACE1_DIM1); - tu32=malloc(sizeof(int)*SPACE1_DIM1); - - /* Create file */ - fid1 = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - - /* Create dataspace for datasets */ - sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); - - /* Create a group */ - group=H5Gcreate(fid1,"Group1",-1); - - /* Set group's comment */ - ret=H5Gset_comment(group,".",write_comment); - - /* Create a dataset (inside Group1) */ - dataset=H5Dcreate(group,"Dataset1",H5T_STD_U32LE,sid1,H5P_DEFAULT); - - for(i=0; i -#include - -#define FILE2 "trefer2.h5" -#define NPOINTS 10 - -/* 1-D dataset with fixed dimensions */ -#define SPACE1_NAME "Space1" -#define SPACE1_RANK 1 -#define SPACE1_DIM1 4 - -/* 2-D dataset with fixed dimensions */ -#define SPACE2_NAME "Space2" -#define SPACE2_RANK 2 -#define SPACE2_DIM1 10 -#define SPACE2_DIM2 10 - -int -main(void) -{ - hid_t fid1; /* HDF5 File IDs */ - hid_t dset1, /* Dataset ID */ - dset2; /* Dereferenced dataset ID */ - hid_t sid1, /* Dataspace ID #1 */ - sid2; /* Dataspace ID #2 */ - hsize_t * coords; /* Coordinate buffer */ - hsize_t low[SPACE2_RANK]; /* Selection bounds */ - hsize_t high[SPACE2_RANK]; /* Selection bounds */ - hdset_reg_ref_t *rbuf; /* buffer to to read disk */ - int *drbuf; /* Buffer for reading numeric data from disk */ - int i, j; /* counting variables */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - - /* Allocate write & read buffers */ - rbuf=malloc(sizeof(hdset_reg_ref_t)*SPACE1_DIM1); - drbuf=calloc(sizeof(int),SPACE2_DIM1*SPACE2_DIM2); - - /* Open the file */ - fid1 = H5Fopen(FILE2, H5F_ACC_RDWR, H5P_DEFAULT); - - /* Open the dataset */ - dset1=H5Dopen(fid1,"/Dataset1"); - - /* Read selection from disk */ - ret=H5Dread(dset1,H5T_STD_REF_DSETREG,H5S_ALL,H5S_ALL,H5P_DEFAULT,rbuf); - - /* Try to open objects */ - dset2 = H5Rdereference(dset1,H5R_DATASET_REGION,&rbuf[0]); - - /* Check information in referenced dataset */ - sid1 = H5Dget_space(dset2); - - ret=H5Sget_simple_extent_npoints(sid1); - printf(" Number of elements in the dataset is : %d\n",ret); - - /* Read from disk */ - ret=H5Dread(dset2,H5T_NATIVE_INT,H5S_ALL,H5S_ALL,H5P_DEFAULT,drbuf); - - for(i=0; i -#include - -#define FILE2 "trefer2.h5" -#define SPACE1_NAME "Space1" -#define SPACE1_RANK 1 -#define SPACE1_DIM1 4 - -/* Dataset with fixed dimensions */ -#define SPACE2_NAME "Space2" -#define SPACE2_RANK 2 -#define SPACE2_DIM1 10 -#define SPACE2_DIM2 10 - -/* Element selection information */ -#define POINT1_NPOINTS 10 - -int -main(void) -{ - hid_t fid1; /* HDF5 File IDs */ - hid_t dset1, /* Dataset ID */ - dset2; /* Dereferenced dataset ID */ - hid_t sid1, /* Dataspace ID #1 */ - sid2; /* Dataspace ID #2 */ - hsize_t dims1[] = {SPACE1_DIM1}, - dims2[] = {SPACE2_DIM1, SPACE2_DIM2}; - hsize_t start[SPACE2_RANK]; /* Starting location of hyperslab */ - hsize_t stride[SPACE2_RANK]; /* Stride of hyperslab */ - hsize_t count[SPACE2_RANK]; /* Element count of hyperslab */ - hsize_t block[SPACE2_RANK]; /* Block size of hyperslab */ - hsize_t coord1[POINT1_NPOINTS][SPACE2_RANK]; - /* Coordinates for point selection */ - hdset_reg_ref_t *wbuf; /* buffer to write to disk */ - int *dwbuf; /* Buffer for writing numeric data to disk */ - int i; /* counting variables */ - herr_t ret; /* Generic return value */ - - - /* Allocate write & read buffers */ - wbuf=calloc(sizeof(hdset_reg_ref_t), SPACE1_DIM1); - dwbuf=malloc(sizeof(int)*SPACE2_DIM1*SPACE2_DIM2); - - /* Create file */ - fid1 = H5Fcreate(FILE2, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - - /* Create dataspace for datasets */ - sid2 = H5Screate_simple(SPACE2_RANK, dims2, NULL); - - /* Create a dataset */ - dset2=H5Dcreate(fid1,"Dataset2",H5T_STD_U8LE,sid2,H5P_DEFAULT); - - for(i=0; i - -#define FILE "refere.h5" - -int -main(void) { - hid_t fid; /* File, datasets, datatypes and */ - hid_t did_a, sid_a; /* dataspaces identifiers for three */ - hid_t did_b, tid_b, sid_b; /* datasets. */ - hid_t did_r, tid_r, sid_r; - herr_t status; - - hobj_ref_t *wbuf; /* buffer to write to disk */ - hobj_ref_t *rbuf; /* buffer to read from disk */ - - - hsize_t dim_r[1]; - hsize_t dim_a[1]; - hsize_t dim_b[2]; - - herr_t ret; /* return values */ - - /* - * Create a file using default properties. - */ - fid = H5Fcreate(FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - - /* - * Create dataset "A" in the file. - */ - dim_a[0] = 5; - sid_a = H5Screate_simple(1, dim_a, NULL); - did_a = H5Dcreate(fid, "A", H5T_NATIVE_INT, sid_a, H5P_DEFAULT); - - /* - * Create dataset "B" in the file. - */ - dim_b[0] = 2; - dim_b[1] = 6; - sid_b = H5Screate_simple(2, dim_b, NULL); - did_b = H5Dcreate(fid, "B", H5T_NATIVE_FLOAT, sid_b, H5P_DEFAULT); - - /* - * Create dataset "R" to store references to the datasets "A" and "B". - */ - dim_r[0] = 2; - sid_r = H5Screate_simple(1, dim_r, NULL); - tid_r = H5Tcopy(H5T_STD_REF_OBJ); - did_r = H5Dcreate(fid, "R", tid_r, sid_r, H5P_DEFAULT ); - - /* - * Allocate write and read buffers. - */ - wbuf = malloc(sizeof(hobj_ref_t)*2); - rbuf = malloc(sizeof(hobj_ref_t)*2); - - /* - * Create references to the datasets "A" and "B" - * and store them in the wbuf. - */ - H5Rcreate(&wbuf[0], fid, "A", H5R_OBJECT, -1); - H5Rcreate(&wbuf[1], fid, "B", H5R_OBJECT, -1); - - /* - * Write dataset R using default transfer properties. - */ - status = H5Dwrite(did_r, H5T_STD_REF_OBJ, H5S_ALL, H5S_ALL, - H5P_DEFAULT, wbuf); - - /* - * Close all objects. - */ - H5Sclose(sid_a); - H5Dclose(did_a); - - H5Sclose(sid_b); - H5Dclose(did_b); - - H5Tclose(tid_r); - H5Sclose(sid_r); - H5Dclose(did_r); - - H5Fclose(fid); - - /* - * Reopen the file. - */ - fid = H5Fopen(FILE, H5F_ACC_RDWR, H5P_DEFAULT); - - /* - * Open and read dataset "R". - */ - did_r = H5Dopen(fid, "R"); - status = H5Dread(did_r, H5T_STD_REF_OBJ, H5S_ALL, H5S_ALL, - H5P_DEFAULT, rbuf); - - /* - * Open dataset A using reference to it. - */ - did_a = H5Rdereference(did_r, H5R_OBJECT, &rbuf[0]); - - /* - * Get rank of the dataset "A" - */ - - printf("\n"); - sid_a = H5Dget_space(did_a); - ret = H5Sget_simple_extent_ndims(sid_a); - - if(ret == 1) printf("Rank of A is %d.\n", ret); - printf("\n"); - - /* - * Get datatype of the dataset "B" - */ - did_b = H5Rdereference(did_r, H5R_OBJECT, &rbuf[1]); - tid_b = H5Dget_type(did_b); - if(H5Tequal(tid_b, H5T_NATIVE_FLOAT)) - printf("Datatype of B is H5T_NATIVE_FLOAT.\n"); - printf("\n"); - - /* - * Close all objects. - */ - H5Dclose(did_a); - H5Sclose(sid_a); - H5Dclose(did_b); - H5Tclose(tid_b); - H5Fclose(fid); - - return 0; - - } - - - diff --git a/doc/html/Tutor/examples/hyperslab.f90 b/doc/html/Tutor/examples/hyperslab.f90 deleted file mode 100644 index e49f18b..0000000 --- a/doc/html/Tutor/examples/hyperslab.f90 +++ /dev/null @@ -1,199 +0,0 @@ -! -! This example shows how to write and read a hyperslab. -! - - PROGRAM SELECTEXAMPLE - - USE HDF5 ! This module contains all necessary modules - - IMPLICIT NONE - - CHARACTER(LEN=7), PARAMETER :: filename = "sdsf.h5" ! File name - CHARACTER(LEN=8), PARAMETER :: dsetname = "IntArray" ! Dataset name - - INTEGER(HID_T) :: file_id ! File identifier - INTEGER(HID_T) :: dset_id ! Dataset identifier - INTEGER(HID_T) :: dataspace ! Dataspace identifier - INTEGER(HID_T) :: memspace ! memspace identifier - - INTEGER(HSIZE_T), DIMENSION(3) :: dimsm = (/7,7,3/) ! Dataset dimensions - ! in memory - INTEGER(HSIZE_T), DIMENSION(2) :: dims_out ! Buffer to read in dataset - ! dimesions - INTEGER(HSIZE_T), DIMENSION(2) :: dimsf = (/5,6/) ! Dataset dimensions. - - INTEGER(HSIZE_T), DIMENSION(2) :: count = (/3,4/) - ! Size of the hyperslab in the file - INTEGER(HSIZE_T), DIMENSION(2) :: offset = (/1,2/) - !hyperslab offset in the file - INTEGER(HSIZE_T), DIMENSION(3) :: count_out = (/3,4,1/) - !Size of the hyperslab in memory - INTEGER(HSIZE_T), DIMENSION(3) :: offset_out = (/3,0,0/) - !hyperslab offset in memory - INTEGER, DIMENSION(5,6) :: data ! Data to write - INTEGER, DIMENSION(7,7,3) :: data_out ! Output buffer - INTEGER :: dsetrank = 2 ! Dataset rank ( in file ) - INTEGER :: memrank = 3 ! Dataset rank ( in memory ) - INTEGER :: rank - INTEGER :: i, j, k - - INTEGER :: error, error_n ! Error flags - - - ! - ! Write data to the HDF5 file. - ! - - ! - ! Data initialization. - ! - do i = 1, 5 - do j = 1, 6 - data(i,j) = (i-1) + (j-1); - end do - end do - ! - ! 0, 1, 2, 3, 4, 5 - ! 1, 2, 3, 4, 5, 6 - ! 2, 3, 4, 5, 6, 7 - ! 3, 4, 5, 6, 7, 8 - ! 4, 5, 6, 7, 8, 9 - ! - - ! - ! Initialize FORTRAN predefined datatypes - ! - CALL h5open_f(error) - - ! - ! Create a new file using default properties. - ! - CALL h5fcreate_f(filename, H5F_ACC_TRUNC_F, file_id, error) - - ! - ! Create the data space for the dataset. - ! - CALL h5screate_simple_f(dsetrank, dimsf, dataspace, error) - - ! - ! Create the dataset with default properties. - ! - CALL h5dcreate_f(file_id, dsetname, H5T_NATIVE_INTEGER, dataspace, & - dset_id, error) - - ! - ! Write the dataset. - ! - CALL h5dwrite_f(dset_id, H5T_NATIVE_INTEGER, data, error) - - ! - ! Close the dataspace for the dataset. - ! - CALL h5sclose_f(dataspace, error) - - ! - ! Close the dataset. - ! - CALL h5dclose_f(dset_id, error) - - ! - ! Close the file. - ! - CALL h5fclose_f(file_id, error) - - ! - ! This part of the code reads the hyperslab from the sds.h5 file just - ! created, into a 2-dimensional plane of the 3-dimensional dataset. - ! - - ! - ! Initialize data_out array. - ! - do i = 1, 7 - do j = 1, 7 - do k = 1,3 - data_out(i,j,k) = 0; - end do - end do - end do - - ! - ! Open the file. - ! - CALL h5fopen_f (filename, H5F_ACC_RDONLY_F, file_id, error) - - ! - ! Open the dataset. - ! - CALL h5dopen_f(file_id, dsetname, dset_id, error) - - ! - ! Get dataset's dataspace identifier. - ! - CALL h5dget_space_f(dset_id, dataspace, error) - - ! - ! Select hyperslab in the dataset. - ! - CALL h5sselect_hyperslab_f(dataspace, H5S_SELECT_SET_F, & - offset, count, error) - ! - ! Create memory dataspace. - ! - CALL h5screate_simple_f(memrank, dimsm, memspace, error) - - ! - ! Select hyperslab in memory. - ! - CALL h5sselect_hyperslab_f(memspace, H5S_SELECT_SET_F, & - offset_out, count_out, error) - - ! - ! Read data from hyperslab in the file into the hyperslab in - ! memory and display. - ! - CALL H5Dread_f(dset_id, H5T_NATIVE_INTEGER, data_out, error, & - memspace, dataspace) - - ! - ! Display data_out array - ! - do i = 1, 7 - print *, (data_out(i,j,1), j = 1,7) - end do - - ! 0 0 0 0 0 0 0 - ! 0 0 0 0 0 0 0 - ! 0 0 0 0 0 0 0 - ! 3 4 5 6 0 0 0 - ! 4 5 6 7 0 0 0 - ! 5 6 7 8 0 0 0 - ! 0 0 0 0 0 0 0 - ! - - ! - ! Close the dataspace for the dataset. - ! - CALL h5sclose_f(dataspace, error) - - ! - ! Close the memoryspace. - ! - CALL h5sclose_f(memspace, error) - - ! - ! Close the dataset. - ! - CALL h5dclose_f(dset_id, error) - - ! - ! Close the file. - ! - CALL h5fclose_f(file_id, error) - - ! - ! Close FORTRAN predefined datatypes. - ! - CALL h5close_f(error) - - END PROGRAM SELECTEXAMPLE diff --git a/doc/html/Tutor/examples/java/Compound.java b/doc/html/Tutor/examples/java/Compound.java deleted file mode 100644 index 219e1c1..0000000 --- a/doc/html/Tutor/examples/java/Compound.java +++ /dev/null @@ -1,540 +0,0 @@ -/****************************************************************** - * Compound.java (for HDF5 tutorial lesson 11) - * - * -- Creating a compound data type - * (a java conversion from compound.c) - * - ******************************************************************/ - -import ncsa.hdf.hdf5lib.*; -import ncsa.hdf.hdf5lib.exceptions.*; - -public class Compound -{ - public static void main (String []argv) - { - final String FILE = "SDScompound.h5"; - final String DATASETNAME = "ArrayOfStructures"; - final int LENGTH = 10; - final int RANK = 1; - - /* First structure and dataset */ - /* an array of LENGTH 'complex' numbers */ - byte[] data1 = new byte[LENGTH * 16]; - - int[] AR = new int[1]; - float[] BR = new float[1]; - double[] CR = new double[1]; - - byte [] ARec = new byte[4]; - byte [] BRec = new byte[4]; - byte [] CRec = new byte[8]; - - int s1_tid; /* File datatype identifier */ - - /* Second structure (subset of s1_t) and dataset*/ - byte[] data2 = new byte[LENGTH * 12]; - int s2_tid; /* Memory datatype handle */ - - /* Third "structure" ( will be used to read float field of s1) */ - int s3_tid; /* Memory datatype handle */ - float[] s3 = new float[LENGTH]; - - int i; - int file, dataset, space; /* Handles */ - int status; - long[] dim = new long[1]; /* Dataspace dimensions */ - dim[0] = LENGTH; - - /* - * Initialize the data - */ - for (i = 0; i < LENGTH; i++) - { - AR[0] = (int) i; - BR[0] = (float) i * i; - CR[0] = (double) 1. / (i + 1); - - ARec = HDFNativeData.intToByte (0, 1, AR); - BRec = HDFNativeData.floatToByte (0, 1, BR); - CRec = HDFNativeData.doubleToByte (0, 1, CR); - - System.arraycopy (ARec, 0, data1, (i * 16), 4); - System.arraycopy (BRec, 0, data1, (i * 16) + 4, 4); - System.arraycopy (CRec, 0, data1, (i * 16) + 8, 8); - } - - /* - * Create the data space. - */ - space = H5Screate_simple_wrap (RANK, dim, null); - - /* - * Create the file. - */ - file = H5Fcreate_wrap (FILE, HDF5Constants.H5F_ACC_TRUNC, - HDF5Constants.H5P_DEFAULT, - HDF5Constants.H5P_DEFAULT); - - /* - * Create the memory data type. - */ - s1_tid = H5Tcreate_wrap (HDF5Constants.H5T_COMPOUND, 16); - H5Tinsert_wrap (s1_tid, "a_name", 0, - H5.J2C (HDF5CDataTypes.JH5T_NATIVE_INT)); - H5Tinsert_wrap (s1_tid, "b_name", 4, - H5.J2C (HDF5CDataTypes.JH5T_NATIVE_FLOAT)); - H5Tinsert_wrap (s1_tid, "c_name", 8, - H5.J2C (HDF5CDataTypes.JH5T_NATIVE_DOUBLE)); - - /* - * Create the dataset. - */ - dataset = H5Dcreate_wrap (file, DATASETNAME, s1_tid, - space, HDF5Constants.H5P_DEFAULT); - - /* - * Wtite data to the dataset; - */ - status = H5Dwrite_wrap (dataset, s1_tid, - HDF5Constants.H5S_ALL, HDF5Constants.H5S_ALL, - HDF5Constants.H5P_DEFAULT, data1); - - /* - * Release resources - */ - H5Tclose_wrap (s1_tid); - H5Sclose_wrap (space); - H5Dclose_wrap (dataset); - H5Fclose_wrap (file); - - /* - * Open the file and the dataset. - */ - file = H5Fopen_wrap (FILE, HDF5Constants.H5F_ACC_RDONLY, - HDF5Constants.H5P_DEFAULT); - - dataset = H5Dopen_wrap (file, DATASETNAME); - - /* - * Create a data type for s2 - */ - s2_tid = H5Tcreate_wrap (HDF5Constants.H5T_COMPOUND, 12); - H5Tinsert_wrap (s2_tid, "c_name", 0, - H5.J2C (HDF5CDataTypes.JH5T_NATIVE_DOUBLE)); - H5Tinsert_wrap (s2_tid, "a_name", 8, - H5.J2C (HDF5CDataTypes.JH5T_NATIVE_INT)); - - /* - * Read two fields c and a from s1 dataset. Fields in the file - * are found by their names "c_name" and "a_name". - */ - status = H5Dread_wrap (dataset, s2_tid, HDF5Constants.H5S_ALL, - HDF5Constants.H5S_ALL, - HDF5Constants.H5P_DEFAULT, data2); - - /* - * Display the fields. Convert from bytes into numbers. - */ - System.out.println ("\nField c : "); - for( i = 0; i < LENGTH; i++) { - System.arraycopy (data2, (i*12), CRec, 0, 8); - CR = HDFNativeData.byteToDouble(0, 1, CRec); - System.out.print (CR[0]+" "); - } - System.out.println (); - - System.out.println("\nField a :"); - for( i = 0; i < LENGTH; i++) { - System.arraycopy (data2, (i*12)+8, ARec, 0, 4); - AR = HDFNativeData.byteToInt(0, 1, ARec); - System.out.print (AR[0]+" "); - } - System.out.println (); - - /* - * Create a data type for s3. - */ - s3_tid = H5Tcreate_wrap (HDF5Constants.H5T_COMPOUND, 4); - - status = - H5Tinsert_wrap (s3_tid, "b_name", 0, - H5.J2C (HDF5CDataTypes.JH5T_NATIVE_FLOAT)); - - /* - * Read field b from s1 dataset. Field in the file is found by its name. - */ - status = H5Dread_wrap (dataset, s3_tid, HDF5Constants.H5S_ALL, - HDF5Constants.H5S_ALL, - HDF5Constants.H5P_DEFAULT, s3); - - /* - * Display the field. Data is read directly into array of 'float'. - */ - System.out.println (); - System.out.println ("Field b :"); - for( i = 0; i < LENGTH; i++) { - System.out.print (s3[i]+" "); - } - System.out.println (); - - /* - * Release resources - */ - H5Tclose_wrap (s2_tid); - H5Tclose_wrap (s3_tid); - H5Dclose_wrap (dataset); - H5Fclose_wrap (file); - } - - - // Help function for creating a new file - public static int H5Fcreate_wrap (String name, int flags, - int create_id, int access_id) - { - int file_id = -1; // file identifier - try - { - // Create a new file using default file properties. - file_id = H5.H5Fcreate (name, flags, create_id, access_id); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("Compound.H5Fcreate_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("Compound.H5Fcreate_wrap() with other Exception: " - + e.getMessage()); - } - return file_id; - } - - - // Help function for adding another member to the compound - // datatype datatype_id. - public static int H5Tinsert_wrap (int type_id, String name, - long offset, int field_id) - { - int status = -1; - try - { - // Adding another member to the compound datatype datatype_id. - status = H5.H5Tinsert (type_id, name, offset, field_id); - - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("Compound.H5Tinsert_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("Compound.H5Tinsert_wrap() with HDF5Exception: " - + e.getMessage()); - } - return status; - } - - - // Help function for creating the memory data type. - public static int H5Tcreate_wrap (int dclass, int size) - { - int datatype_id = -1; // memory data type identifier - try - { - // Create the memory data type. - datatype_id = H5.H5Tcreate (dclass, size); - - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("Compound.H5Tcreate_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("Compound.H5Tcreate_wrap() with other Exception: " - + e.getMessage()); - } - return datatype_id; - } - - - // Help function for opening an existing file - public static int H5Fopen_wrap (String name, int flags, int access_id) - { - int file_id = -1; // file identifier - try - { - // Create a new file using default file properties. - file_id = H5.H5Fopen (name, flags, access_id); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("Compound.H5Fopen_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("Compound.H5Fopen_wrap() with other Exception: " - + e.getMessage()); - } - return file_id; - } - - - // Help function for opening an existing dataset - public static int H5Dopen_wrap (int loc_id, String name) - { - int dataset_id = -1; // dataset identifier - - try - { - // Opening an existing dataset - dataset_id = H5.H5Dopen (loc_id, name); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("Compound.H5Dopen_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("Compound.H5Dopen_wrap() with other Exception: " - + e.getMessage()); - } - return dataset_id; - } - - - // Help function for creating a new simple dataspace and opening it - // for access - public static int H5Screate_simple_wrap (int rank, long dims[], - long maxdims[]) - { - int dataspace_id = -1; // dataspace identifier - - try - { - // Create the data space for the dataset. - dataspace_id = H5.H5Screate_simple (rank, dims, maxdims); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("Compound.H5Screate_simple_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("Compound.H5Screate_simple_wrap() with other Exception: " - + e.getMessage()); - } - return dataspace_id; - } - - - // Help function for creating a dataset - public static int H5Dcreate_wrap (int loc_id, String name, int type_id, - int space_id, int create_plist_id) - { - int dataset_id = -1; // dataset identifier - - try - { - // Create the dataset - dataset_id = H5.H5Dcreate (loc_id, name, type_id, space_id, - create_plist_id); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("Compound.H5Dcreate_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("Compound.H5Dcreate_wrap() with other Exception: " - + e.getMessage()); - } - return dataset_id; - } - - - // Help function for writing the dataset - public static int H5Dwrite_wrap (int dataset_id, int mem_type_id, - int mem_space_id, int file_space_id, - int xfer_plist_id, Object buf) - { - int status = -1; - - try - { - // Write the dataset. - status = H5.H5Dwrite (dataset_id, mem_type_id, mem_space_id, - file_space_id, xfer_plist_id, buf); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("Compound.H5Dwrite_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("Compound.H5Dwrite_wrap() with other exception: " - + e.getMessage()); - } - return status; - } - - - // Help function for reading the dataset - public static int H5Dread_wrap (int dataset_id, int mem_type_id, - int mem_space_id, int file_space_id, - int xfer_plist_id, Object obj) - { - int status = -1; - - try - { - // Read the dataset. - status = H5.H5Dread (dataset_id, mem_type_id, mem_space_id, - file_space_id, xfer_plist_id, obj); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("Compound.H5Dread_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("Compound.H5Dread_wrap() with other exception: " - + e.getMessage()); - } - return status; - } - - - - // Help function for terminating access to the data space. - public static int H5Sclose_wrap (int dataspace_id) - { - int status = -1; - - try - { - // Terminate access to the data space. - status = H5.H5Sclose (dataspace_id); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("Compound.H5Sclose_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("Compound.H5Sclose_wrap() with other exception: " - + e.getMessage()); - } - return status; - } - - - // Help function for releasing a datatype. - public static int H5Tclose_wrap (int type_id) - { - int status = -1; - - try - { - // Releasing a datatype. - status = H5.H5Tclose (type_id); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("Compound.H5Tclose_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("Compound.H5Tclose_wrap() with other exception: " - + e.getMessage()); - } - return status; - } - - - // Help function for ending access to the dataset and releasing - // resources used by it. - public static int H5Dclose_wrap (int dataset_id) - { - int status = -1; - - try - { - // End access to the dataset and release resources used by it. - status = H5.H5Dclose (dataset_id); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("Compound.H5Dclose_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("Compound.H5Dclose_wrap() with other exception: " - + e.getMessage()); - } - return status; - } - - - // Help function for terminating access to the file. - public static int H5Fclose_wrap (int file_id) - { - int status = -1; - - try - { - // Terminate access to the file. - status = H5.H5Fclose (file_id); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("Compound.H5Fclose_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("Compound.H5Fclose_wrap() with other exception: " - + e.getMessage()); - } - return status; - } -} diff --git a/doc/html/Tutor/examples/java/Copy.java b/doc/html/Tutor/examples/java/Copy.java deleted file mode 100644 index f174210..0000000 --- a/doc/html/Tutor/examples/java/Copy.java +++ /dev/null @@ -1,541 +0,0 @@ -/****************************************************************** - * Copy.java (for HDF5 tutorial lesson 13) - * - * -- Showing how to use the H5SCOPY function. - * (a java conversion from h5_copy.c) - * - ******************************************************************/ - -import ncsa.hdf.hdf5lib.*; -import ncsa.hdf.hdf5lib.exceptions.*; - -public class Copy -{ - public static void main (String []argv) - { - final String FILE1 = "copy1.h5"; - final String FILE2 = "copy2.h5"; - - final int RANK = 2; - final int DIM1 = 3; - final int DIM2 = 4; - final int NUMP = 2; - - int file1, file2, dataset1, dataset2; - int mid1, mid2, fid1, fid2; - long[] fdim = new long[2]; - fdim[0] = DIM1; - fdim[1] = DIM2; - long[] mdim = new long[2]; - fdim[0] = DIM1; - fdim[1] = DIM2; - - long[] start = new long[2]; - long[] stride = new long[2]; - long[] count = new long[2]; - long[] block = new long[2]; - - int[][] buf1 = new int[DIM1][DIM2]; - int[][] buf2 = new int[DIM1][DIM2]; - int[][] bufnew = new int[DIM1][DIM2]; - - int[] val = new int[2]; - val[0] = 53; - val[1] = 59; - - long[] marray = {2}; - long[][] coord = new long[NUMP][RANK]; - int ret; - int i, j; - - -/***********************************************************************/ -/* */ -/* Create two files containing identical datasets. Write 0's to one */ -/* and 1's to the other. */ -/* */ -/***********************************************************************/ - - for ( i = 0; i < DIM1; i++ ) - for ( j = 0; j < DIM2; j++ ) - buf1[i][j] = 0; - - for ( i = 0; i < DIM1; i++ ) - for ( j = 0; j < DIM2; j++ ) - buf2[i][j] = 1; - - file1 = H5Fcreate_wrap (FILE1, HDF5Constants.H5F_ACC_TRUNC, - HDF5Constants.H5P_DEFAULT, - HDF5Constants.H5P_DEFAULT); - file2 = H5Fcreate_wrap (FILE2, HDF5Constants.H5F_ACC_TRUNC, - HDF5Constants.H5P_DEFAULT, - HDF5Constants.H5P_DEFAULT); - - fid1 = H5Screate_simple_wrap (RANK, fdim, null); - fid2 = H5Screate_simple_wrap (RANK, fdim, null); - - dataset1 = H5Dcreate_wrap - (file1, "Copy1", H5.J2C (HDF5CDataTypes.JH5T_NATIVE_INT), fid1, - HDF5Constants.H5P_DEFAULT); - - dataset2 = H5Dcreate_wrap - (file2, "Copy2", H5.J2C (HDF5CDataTypes.JH5T_NATIVE_INT), fid2, - HDF5Constants.H5P_DEFAULT); - - - ret = H5Dwrite_wrap (dataset1, H5.J2C (HDF5CDataTypes.JH5T_NATIVE_INT), - HDF5Constants.H5S_ALL, HDF5Constants.H5S_ALL, - HDF5Constants.H5P_DEFAULT, buf1); - - ret = H5Dwrite_wrap (dataset2, H5.J2C (HDF5CDataTypes.JH5T_NATIVE_INT), - HDF5Constants.H5S_ALL, HDF5Constants.H5S_ALL, - HDF5Constants.H5P_DEFAULT, buf2); - - ret = H5Dclose_wrap (dataset1); - ret = H5Dclose_wrap (dataset2); - - ret = H5Sclose_wrap (fid1); - ret = H5Sclose_wrap (fid2); - - ret = H5Fclose_wrap (file1); - ret = H5Fclose_wrap (file2); - - -/***********************************************************************/ -/* */ -/* Open the two files. Select two points in one file, write values to */ -/* those point locations, then do H5Scopy and write the values to the */ -/* other file. Close files. */ -/* */ -/***********************************************************************/ - - file1 = H5Fopen_wrap (FILE1, HDF5Constants.H5F_ACC_RDWR, - HDF5Constants.H5P_DEFAULT); - - file2 = H5Fopen_wrap (FILE2, HDF5Constants.H5F_ACC_RDWR, - HDF5Constants.H5P_DEFAULT); - - dataset1 = H5Dopen_wrap (file1, "Copy1"); - dataset2 = H5Dopen_wrap (file2, "Copy2"); - - fid1 = H5Dget_space_wrap (dataset1); - mid1 = H5Screate_simple_wrap (1, marray, null); - - coord[0][0] = 0; coord[0][1] = 3; - coord[1][0] = 0; coord[1][1] = 1; - - ret = H5Sselect_elements_wrap (fid1, HDF5Constants.H5S_SELECT_SET, - NUMP, coord); - - ret = H5Dwrite_wrap (dataset1, H5.J2C (HDF5CDataTypes.JH5T_NATIVE_INT), - mid1, fid1, HDF5Constants.H5P_DEFAULT, val); - - fid2 = H5Scopy_wrap (fid1); - - ret = H5Dwrite_wrap (dataset2, H5.J2C (HDF5CDataTypes.JH5T_NATIVE_INT), - mid1, fid2, HDF5Constants.H5P_DEFAULT, val); - - ret = H5Dclose_wrap (dataset1); - ret = H5Dclose_wrap (dataset2); - ret = H5Sclose_wrap (fid1); - ret = H5Sclose_wrap (fid2); - ret = H5Fclose_wrap (file1); - ret = H5Fclose_wrap (file2); - ret = H5Sclose_wrap (mid1); - - -/***********************************************************************/ -/* */ -/* Open both files and print the contents of the datasets. */ -/* */ -/***********************************************************************/ - - file1 = H5Fopen_wrap (FILE1, HDF5Constants.H5F_ACC_RDWR, - HDF5Constants.H5P_DEFAULT); - file2 = H5Fopen_wrap (FILE2, HDF5Constants.H5F_ACC_RDWR, - HDF5Constants.H5P_DEFAULT); - dataset1 = H5Dopen_wrap (file1, "Copy1"); - dataset2 = H5Dopen_wrap (file2, "Copy2"); - - ret = H5Dread_wrap (dataset1, H5.J2C (HDF5CDataTypes.JH5T_NATIVE_INT), - HDF5Constants.H5S_ALL, HDF5Constants.H5S_ALL, - HDF5Constants.H5P_DEFAULT, bufnew); - - System.out.println ("\nDataset 'Copy1' in file 'copy1.h5' contains: "); - - for (i = 0;i < DIM1; i++) - { - for (j = 0;j < DIM2; j++) - System.out.print (bufnew[i][j]); - System.out.println (); - } - - System.out.println ("\nDataset 'Copy2' in file 'copy2.h5' contains: "); - - ret = H5Dread_wrap (dataset2, H5.J2C (HDF5CDataTypes.JH5T_NATIVE_INT), - HDF5Constants.H5S_ALL, HDF5Constants.H5S_ALL, - HDF5Constants.H5P_DEFAULT, bufnew); - - for (i = 0;i < DIM1; i++) - { - for (j = 0;j < DIM2; j++) - System.out.print (bufnew[i][j]); - System.out.println (); - } - - ret = H5Dclose_wrap (dataset1); - ret = H5Dclose_wrap (dataset2); - ret = H5Fclose_wrap (file1); - ret = H5Fclose_wrap (file2); - } - - - // Help function for creating a new file - public static int H5Fcreate_wrap (String name, int flags, - int create_id, int access_id) - { - int file_id = -1; // file identifier - try - { - // Create a new file using default file properties. - file_id = H5.H5Fcreate (name, flags, create_id, access_id); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("Copy.H5Fcreate_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("Copy.H5Fcreate_wrap() with other Exception: " - + e.getMessage()); - } - return file_id; - } - - - // Help function for opening an existing file - public static int H5Fopen_wrap (String name, int flags, int access_id) - { - int file_id = -1; // file identifier - try - { - // Create a new file using default file properties. - file_id = H5.H5Fopen (name, flags, access_id); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("Copy.H5Fopen_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("Copy.H5Fopen_wrap() with other Exception: " - + e.getMessage()); - } - return file_id; - } - - - // Help function for opening an existing dataset - public static int H5Dopen_wrap (int loc_id, String name) - { - int dataset_id = -1; // dataset identifier - - try - { - // Opening an existing dataset - dataset_id = H5.H5Dopen (loc_id, name); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("Copy.H5Dopen_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("Copy.H5Dopen_wrap() with other Exception: " - + e.getMessage()); - } - return dataset_id; - } - - - // Help function for creating a new simple dataspace and opening it - // for access - public static int H5Screate_simple_wrap (int rank, long dims[], - long maxdims[]) - { - int dataspace_id = -1; // dataspace identifier - - try - { - // Create the data space for the dataset. - dataspace_id = H5.H5Screate_simple (rank, dims, maxdims); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("Copy.H5Screate_simple_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("Copy.H5Screate_simple_wrap() with other Exception: " - + e.getMessage()); - } - return dataspace_id; - } - - - // Help function for getting an identifier for a copy of - // the dataspace for a dataset - public static int H5Dget_space_wrap (int dataset_id) - { - int dataspace_id = -1; - - try - { - // Returning an identifier for a copy of the dataspace for a dataset - dataspace_id = H5.H5Dget_space (dataset_id); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("Copy.H5Dget_space_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("Copy.H5Dget_space_wrap() with other Exception: " - + e.getMessage()); - } - return dataspace_id; - } - - - // Help function for selecting array elements to be included in - // the selection for the space_id dataspace. - public static int H5Sselect_elements_wrap (int space_id, int op, - int num_elements, - long coord2D[][]) - { - int status = -1; - - try - { - status = H5.H5Sselect_elements (space_id, op, num_elements, - coord2D); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("Copy.H5Sselect_elements_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("Copy.H5Sselect_elements_wrap() with other Exception: " - + e.getMessage()); - } - return status; - } - - - // Help function for creating a new dataspace which is an exact - // copy of the dataspace identified by space_id. - public static int H5Scopy_wrap (int space_id) - { - int dataspace_id = -1; - - try - { - dataspace_id = H5.H5Scopy(space_id); - } - catch (HDF5Exception hdf5e) - { - System.out.println ("Copy.H5Scopy_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println ("Copy.H5Scopy_wrap() with other Exception: " - + e.getMessage()); - } - return dataspace_id; - } - - - // Help function for creating a dataset - public static int H5Dcreate_wrap (int loc_id, String name, int type_id, - int space_id, int create_plist_id) - { - int dataset_id = -1; // dataset identifier - - try - { - // Create the dataset - dataset_id = H5.H5Dcreate (loc_id, name, type_id, space_id, - create_plist_id); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("Copy.H5Dcreate_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("Copy.H5Dcreate_wrap() with other Exception: " - + e.getMessage()); - } - return dataset_id; - } - - - // Help function for writing the dataset - public static int H5Dwrite_wrap (int dataset_id, int mem_type_id, - int mem_space_id, int file_space_id, - int xfer_plist_id, Object buf) - { - int status = -1; - - try - { - // Write the dataset. - status = H5.H5Dwrite (dataset_id, mem_type_id, mem_space_id, - file_space_id, xfer_plist_id, buf); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("Copy.H5Dwrite_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("Copy.H5Dwrite_wrap() with other exception: " - + e.getMessage()); - } - return status; - } - - - // Help function for reading the dataset - public static int H5Dread_wrap (int dataset_id, int mem_type_id, - int mem_space_id, int file_space_id, - int xfer_plist_id, Object obj) - { - int status = -1; - - try - { - // Read the dataset. - status = H5.H5Dread (dataset_id, mem_type_id, mem_space_id, - file_space_id, xfer_plist_id, obj); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("Copy.H5Dread_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("Copy.H5Dread_wrap() with other exception: " - + e.getMessage()); - } - return status; - } - - - // Help function for terminating access to the data space. - public static int H5Sclose_wrap (int dataspace_id) - { - int status = -1; - - try - { - // Terminate access to the data space. - status = H5.H5Sclose (dataspace_id); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("Copy.H5Sclose_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("Copy.H5Sclose_wrap() with other exception: " - + e.getMessage()); - } - return status; - } - - - // Help function for ending access to the dataset and releasing - // resources used by it. - public static int H5Dclose_wrap (int dataset_id) - { - int status = -1; - - try - { - // End access to the dataset and release resources used by it. - status = H5.H5Dclose (dataset_id); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("Copy.H5Dclose_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("Copy.H5Dclose_wrap() with other exception: " - + e.getMessage()); - } - return status; - } - - - // Help function for terminating access to the file. - public static int H5Fclose_wrap (int file_id) - { - int status = -1; - - try - { - // Terminate access to the file. - status = H5.H5Fclose (file_id); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("Copy.H5Fclose_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("Copy.H5Fclose_wrap() with other exception: " - + e.getMessage()); - } - return status; - } -} diff --git a/doc/html/Tutor/examples/java/CreateAttribute.java b/doc/html/Tutor/examples/java/CreateAttribute.java deleted file mode 100644 index c926422..0000000 --- a/doc/html/Tutor/examples/java/CreateAttribute.java +++ /dev/null @@ -1,302 +0,0 @@ -/****************************************************************** - * CreateAttribute.java (for HDF5 tutorial lesson 7) - * - * -- Creating and Writing a dataset attribute - * (a java conversion from h5_crtatt.c) - * - ******************************************************************/ - -import ncsa.hdf.hdf5lib.*; -import ncsa.hdf.hdf5lib.exceptions.*; - -public class CreateAttribute -{ - public static void main(String []argv) - { - final String FILE = "dset.h5"; - int file_id = -1; // file identifier - int dataset_id = -1; // dataset identifier - int attribute_id = -1; - int dataspace_id = -1; // dataspace identifier - long[] dims = new long[1]; - int[] attr_data = new int[2]; - int status = -1; - - // Initialize the attribute data. - attr_data[0] = 100; - attr_data[1] = 200; - - // Open an existing file. - file_id = H5Fopen_wrap (FILE, HDF5Constants.H5F_ACC_RDWR, - HDF5Constants.H5P_DEFAULT); - - // Open an existing dataset. - dataset_id = H5Dopen_wrap (file_id, "/dset"); - - // Create the data space for the attribute. - dims[0] = 2; - dataspace_id = H5Screate_simple_wrap (1, dims, null); - - // Create a dataset attribute. - attribute_id = H5Acreate_wrap - (dataset_id, "attr", - H5.J2C (HDF5CDataTypes.JH5T_STD_I32BE), - dataspace_id, HDF5Constants.H5P_DEFAULT); - - // Write the attribute data. - status = H5Awrite_wrap - (attribute_id, - H5.J2C (HDF5CDataTypes.JH5T_NATIVE_INT), - attr_data); - - // Close the attribute. - status = H5Aclose_wrap (attribute_id); - - // Close the dataspace. - status = H5Sclose_wrap (dataspace_id); - - // Close to the dataset. - status = H5Dclose_wrap (dataset_id); - - // Close the file. - status = H5Fclose_wrap (file_id); - } - - - // Help function for opening an existing file - public static int H5Fopen_wrap (String name, int flags, int access_id) - { - int file_id = -1; // file identifier - try - { - // Create a new file using default file properties. - file_id = H5.H5Fopen (name, flags, access_id); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("CreateAttribute.H5Fopen_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("CreateAttribute.H5Fopen_wrap() with other Exception: " - + e.getMessage()); - } - return file_id; - } - - - // Help function for opening an existing dataset - public static int H5Dopen_wrap (int loc_id, String name) - { - int dataset_id = -1; // dataset identifier - - try - { - // Opening an existing dataset - dataset_id = H5.H5Dopen (loc_id, name); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("CreateAttribute.H5Dopen_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("CreateAttribute.H5Dopen_wrap() with other Exception: " - + e.getMessage()); - } - return dataset_id; - } - - - // Create the data space for the attribute. - public static int H5Screate_simple_wrap (int rank, long dims[], - long maxdims[]) - { - int dataspace_id = -1; // dataspace identifier - - try - { - // Create the data space for the dataset. - dataspace_id = H5.H5Screate_simple (rank, dims, maxdims); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("CreateAttribute.H5Screate_simple_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - - } - catch (Exception e) - { - System.out.println - ("CreateAttribute.H5Screate_simple_wrap() with other Exception: " - + e.getMessage()); - } - return dataspace_id; - } - - - // Help function for creating a dataset attribute. - public static int H5Acreate_wrap (int loc_id, String name, int type_id, - int space_id, int create_plist) - { - int attribute_id = -1; // attribute identifier - - try - { - // Create the dataset - attribute_id = H5.H5Acreate (loc_id, name, type_id, space_id, - create_plist); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("CreateAttribute.H5Acreate_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("CreateAttribute.H5Acreate_wrap() with other Exception: " - + e.getMessage()); - } - return attribute_id; - } - - - // Help function for writing the attribute data. - public static int H5Awrite_wrap (int attr_id, int mem_type_id, - Object buf) - { - int status = -1; - - try - { - // Write the attribute data. - status = H5.H5Awrite (attr_id, mem_type_id, buf); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("CreateAttribute.H5Awrite_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("CreateAttribute.H5Awrite_wrap() with other exception: " - + e.getMessage()); - } - return status; - } - - - // Help function for closing the attribute - public static int H5Aclose_wrap (int attribute_id) - { - int status = -1; - - try - { - // Close the dataset - status = H5.H5Aclose (attribute_id); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("CreateAttribute.H5Aclose_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("CreateAttribute.H5Aclose_wrap() with other exception: " - + e.getMessage()); - } - return status; - } - - - // Help function for closing the dataset - public static int H5Dclose_wrap (int dataset_id) - { - int status = -1; - - try - { - // Close the dataset - status = H5.H5Dclose (dataset_id); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("CreateAttribute.H5Dclose_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("CreateAttribute.H5Dclose_wrap() with other exception: " - + e.getMessage()); - } - return status; - } - - - // Help function for closing the dataspace - public static int H5Sclose_wrap (int dataspace_id) - { - int status = -1; - - try - { - // Terminate access to the data space. - status = H5.H5Sclose (dataspace_id); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("CreateAttribute.H5Sclose_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("CreateAttribute.H5Sclose_wrap() with other exception: " - + e.getMessage()); - } - return status; - } - - - // Help function for terminating access to the file. - public static int H5Fclose_wrap (int file_id) - { - int status = -1; - - try - { - // Terminate access to the file. - status = H5.H5Fclose (file_id); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("CreateAttribute.H5Fclose_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("CreateAttribute.H5Fclose_wrap() with other exception: " - + e.getMessage()); - } - return status; - } -} diff --git a/doc/html/Tutor/examples/java/CreateDataset.java b/doc/html/Tutor/examples/java/CreateDataset.java deleted file mode 100644 index 05f3f6b..0000000 --- a/doc/html/Tutor/examples/java/CreateDataset.java +++ /dev/null @@ -1,210 +0,0 @@ -/****************************************************************** - * CreateDataset.java (for HDF5 tutorial lesson 5) - * - * -- Creating a HDF5 Dataset - * (a java conversion from h5_crtdat.c) - * - ******************************************************************/ - -import ncsa.hdf.hdf5lib.*; -import ncsa.hdf.hdf5lib.exceptions.*; - -public class CreateDataset -{ - public static void main(String []argv) - { - final String FILE = "dset.h5"; - int file_id = -1; // file identifier - int dataset_id = -1; // dataset identifier - int dataspace_id = -1; // dataspace identifier - long[] dims = new long[2]; - int status = -1; - - // Create a new file using default properties. - file_id = H5Fcreate_wrap (FILE, HDF5Constants.H5F_ACC_TRUNC, - HDF5Constants.H5P_DEFAULT, - HDF5Constants.H5P_DEFAULT); - - // Create the data space for the dataset. - dims[0] = 4; - dims[1] = 6; - dataspace_id = H5Screate_simple_wrap (2, dims, null); - - // Create the dataset. - dataset_id = - H5Dcreate_wrap (file_id, "/dset", - H5.J2C (HDF5CDataTypes.JH5T_STD_I32BE), - dataspace_id, HDF5Constants.H5P_DEFAULT); - - // End access to the dataset and release resources used by it. - status = H5Dclose_wrap (dataset_id); - - // Terminate access to the data space. - status = H5Sclose_wrap (dataspace_id); - - // Close the file. - status = H5Fclose_wrap (file_id); - } - - - // Help function for creating a new file - public static int H5Fcreate_wrap (String name, int flags, - int create_id, int access_id) - { - int file_id = -1; // file identifier - try - { - // Create a new file using default file properties. - file_id = H5.H5Fcreate (name, flags, create_id, access_id); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("CreateDataset.H5Fcreate_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("CreateDataset.H5Fcreate_wrap() with other Exception: " - + e.getMessage()); - } - return file_id; - } - - - // Help function for creating a new simple dataspace and opening it - // for access - public static int H5Screate_simple_wrap (int rank, long dims[], - long maxdims[]) - { - int dataspace_id = -1; // dataspace identifier - - try - { - // Create the data space for the dataset. - dataspace_id = H5.H5Screate_simple (rank, dims, maxdims); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("CreateDataset.H5Screate_simple_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("CreateDataset.H5Screate_simple_wrap() with other Exception: " - + e.getMessage()); - } - return dataspace_id; - } - - - // Help function for creating a dataset - public static int H5Dcreate_wrap (int loc_id, String name, int type_id, - int space_id, int create_plist_id) - { - int dataset_id = -1; // dataset identifier - - try - { - // Create the dataset - dataset_id = H5.H5Dcreate (loc_id, name, type_id, space_id, - create_plist_id); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("CreateDataset.H5Dcreate_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("CreateDataset.H5Dcreate_wrap() with other Exception: " - + e.getMessage()); - } - return dataset_id; - } - - - // Help function for ending access to the dataset and releasing - // resources used by it. - public static int H5Dclose_wrap (int dataset_id) - { - int status = -1; - - try - { - // End access to the dataset and release resources used by it. - status = H5.H5Dclose (dataset_id); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("CreateDataset.H5Dclose_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("CreateDataset.H5Dclose_wrap() with other exception: " - + e.getMessage()); - } - return status; - } - - - // Help function for terminating access to the data space. - public static int H5Sclose_wrap (int dataspace_id) - { - int status = -1; - - try - { - // Terminate access to the data space. - status = H5.H5Sclose (dataspace_id); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("CreateDataset.H5Sclose_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("CreateDataset.H5Sclose_wrap() with other exception: " - + e.getMessage()); - } - return status; - } - - - // Help function for terminating access to the file. - public static int H5Fclose_wrap (int file_id) - { - int status = -1; - - try - { - // Terminate access to the file. - status = H5.H5Fclose (file_id); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("CreateDataset.H5Fclose_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("CreateDataset.H5Fclose_wrap() with other exception: " - + e.getMessage()); - } - return status; - } -} - diff --git a/doc/html/Tutor/examples/java/CreateFile.java b/doc/html/Tutor/examples/java/CreateFile.java deleted file mode 100644 index 550b263..0000000 --- a/doc/html/Tutor/examples/java/CreateFile.java +++ /dev/null @@ -1,83 +0,0 @@ -/****************************************************************** - * CreateFile.java (for HDF5 tutorial lesson 4) - * - * -- Creating a HDF5 file - * (a java conversion from h5_crtfile.c) - * - ******************************************************************/ - -import ncsa.hdf.hdf5lib.*; -import ncsa.hdf.hdf5lib.exceptions.*; - -public class CreateFile -{ - public static void main(String []argv) - { - final String FILE = "file.h5"; - int file_id = -1; // file identifier - int status = -1; - - file_id = H5Fcreate_wrap (FILE, HDF5Constants.H5F_ACC_TRUNC, - HDF5Constants.H5P_DEFAULT, - HDF5Constants.H5P_DEFAULT); - status = H5Fclose_wrap (file_id); - } - - - // Help function for creating a new file - public static int H5Fcreate_wrap (String name, int flags, - int create_id, int access_id) - { - int file_id = -1; // file identifier - try - { - // Create a new file using default file properties. - file_id = H5.H5Fcreate (name, flags, create_id, access_id); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("CreateFile.H5Fcreate_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("CreateFile.H5Fcreate_wrap() with other Exception: " - + e.getMessage()); - } - - System.out.println ("\nThe file name is: " + name); - System.out.println ("The file ID is: " + file_id); - - return file_id; - } - - - // Help function for terminating access to the file. - public static int H5Fclose_wrap (int file_id) - { - int status = -1; - - try - { - // Terminate access to the file. - status = H5.H5Fclose (file_id); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("CreateFile.H5Fclose_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("CreateFile.H5Fclose_wrap() with other exception: " - + e.getMessage()); - } - return status; - } -} - - diff --git a/doc/html/Tutor/examples/java/CreateFileInput.java b/doc/html/Tutor/examples/java/CreateFileInput.java deleted file mode 100644 index 0e7fd4d..0000000 --- a/doc/html/Tutor/examples/java/CreateFileInput.java +++ /dev/null @@ -1,118 +0,0 @@ -/****************************************************************** - * CreateFileInput.java (for HDF5 tutorial Lesson 4) - * - * -- Creating a HDF5 file - * (another java conversion from h5_crtfile.c, give user two options: - * one for library path and one for file name, if user chooses - * nothing, then the default file name is used.) - * - ******************************************************************/ - -import java.lang.System; -import java.util.*; -import ncsa.hdf.hdf5lib.*; -import ncsa.hdf.hdf5lib.exceptions.*; - -public class CreateFileInput -{ - // The run command should be like: - // "./runCreateFileInput -l /usr/lib/hdf5.dll -f ./open.h5" - public static void main(String []argv) - { - int file_id = -1; // file identifier - int status = -1; - String libpath = null; - String filename = null; - - for (int i = 0; i < argv.length; i++) - { - if ("-l".equalsIgnoreCase (argv[i])) - libpath = argv[++i]; - - if ("-f".equalsIgnoreCase (argv[i])) - filename = argv[++i]; - } - - if (libpath != null) - { - Properties pros = System.getProperties (); - pros.put (H5.H5PATH_PROPERTY_KEY, libpath); - - /* - this function call could be used in Java 1.2 - System.setProperty (H5.H5PATH_PROPERTY_KEY, libpath); - */ - } - - if (filename == null) - { - filename = "file.h5"; // if no input file name, use the default name - } - - file_id = H5Fcreate_wrap (filename, - HDF5Constants.H5F_ACC_TRUNC, - HDF5Constants.H5P_DEFAULT, - HDF5Constants.H5P_DEFAULT); - status = H5Fclose_wrap (filename, file_id); - } - - - // Help function for creating a new file - public static int H5Fcreate_wrap (String name, int flags, - int create_id, int access_id) - { - int file_id = -1; // file identifier - try - { - // Create a new file using default file properties. - file_id = H5.H5Fcreate (name, flags, create_id, access_id); - - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("CreateFileInput.H5Fcreate_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("CreateFileInput.H5Fcreate_wrap() with other Exception: " - + e.getMessage()); - } - - System.out.println ("\nThe file name is: " + name); - System.out.println ("The file ID is: " + file_id); - - return file_id; - } - - - // Help function for terminating access to the file. - public static int H5Fclose_wrap (String name, int file_id) - { - int status = -1; - - try - { - // Terminate access to the file. - status = H5.H5Fclose (file_id); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("CreateFileInput.H5Fclose_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("CreateFileInput.H5Fclose_wrap() with other exception: " - + e.getMessage()); - } - - return status; - } -} - - diff --git a/doc/html/Tutor/examples/java/CreateGroup.java b/doc/html/Tutor/examples/java/CreateGroup.java deleted file mode 100644 index 48ef4af..0000000 --- a/doc/html/Tutor/examples/java/CreateGroup.java +++ /dev/null @@ -1,139 +0,0 @@ -/****************************************************************** - * CreateGroup.java (for HDF5 tutorial lesson 8) - * - * -- Creating and closing a group - * (a java conversion from h5_crtgrp.c) - * - ******************************************************************/ - -import ncsa.hdf.hdf5lib.*; -import ncsa.hdf.hdf5lib.exceptions.*; - -public class CreateGroup -{ - public static void main(String []argv) - { - final String FILE = "group.h5"; - int file_id = -1; // file identifier - int group_id = -1; // group identifier - int status = -1; - - // Create a new file using default properties. - file_id = H5Fcreate_wrap (FILE, HDF5Constants.H5F_ACC_TRUNC, - HDF5Constants.H5P_DEFAULT, - HDF5Constants.H5P_DEFAULT); - - // Create a group named "/MyGroup" in the file. - group_id = H5Gcreate_wrap (file_id, "/MyGroup", 0); - - // Close the group. - status = H5Gclose_wrap (group_id); - - // Close the file. - status = H5Fclose_wrap (file_id); - } - - - // Help function for creating a new file - public static int H5Fcreate_wrap (String name, int flags, - int create_id, int access_id) - { - int file_id = -1; // file identifier - try - { - // Create a new file using default file properties. - file_id = H5.H5Fcreate (name, flags, create_id, access_id); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("CreateGroup.H5Fcreate_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("CreateGroup.H5Fcreate_wrap() with other Exception: " - + e.getMessage()); - } - return file_id; - } - - - // Help function for creating a group named "/MyGroup" in the file. - public static int H5Gcreate_wrap (int loc_id, String name, int size_hint) - { - int group_id = -1; // group identifier - try - { - // Create a group - group_id = H5.H5Gcreate (loc_id, name, size_hint); - - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("CreateGroup.H5Gcreate_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("CreateGroup.H5Gcreate_wrap() with other Exception: " - + e.getMessage()); - } - return group_id; - } - - - // Help function for closing the group - public static int H5Gclose_wrap (int group_id) - { - int status = -1; - - try - { - // Close the group - status = H5.H5Gclose (group_id); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("CreateGroup.H5Gclose_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("CreateGroup.H5Gclose_wrap() with other exception: " - + e.getMessage()); - } - return status; - } - - - // Help function for terminating access to the file. - public static int H5Fclose_wrap (int file_id) - { - int status = -1; - - try - { - // Terminate access to the file. - status = H5.H5Fclose (file_id); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("CreateGroup.H5Fclose_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("CreateGroup.H5Fclose_wrap() with other exception: " - + e.getMessage()); - } - return status; - } -} diff --git a/doc/html/Tutor/examples/java/CreateGroupAR.java b/doc/html/Tutor/examples/java/CreateGroupAR.java deleted file mode 100644 index 672f1d1..0000000 --- a/doc/html/Tutor/examples/java/CreateGroupAR.java +++ /dev/null @@ -1,152 +0,0 @@ -/****************************************************************** - * CreateGroupAR.java (for HDF5 tutorial lesson 9) - * - * -- Creating groups using absolute and relative names. - * (a java conversion from h5_crtgrpar.c) - * - ******************************************************************/ - -import ncsa.hdf.hdf5lib.*; -import ncsa.hdf.hdf5lib.exceptions.*; - -public class CreateGroupAR -{ - public static void main(String []argv) - { - final String FILE = "groups.h5"; - int file_id = -1; // file identifier - int group1_id = -1; // group identifier - int group2_id = -1; - int group3_id = -1; - - int status = -1; - - // Create a new file using default properties. - file_id = H5Fcreate_wrap (FILE, HDF5Constants.H5F_ACC_TRUNC, - HDF5Constants.H5P_DEFAULT, - HDF5Constants.H5P_DEFAULT); - - // Create group "MyGroup" in the root group using absolute name. - group1_id = H5Gcreate_wrap (file_id, "/MyGroup", 0); - - - // Create group "Group_A" in group "MyGroup" using absolute name. - group2_id = H5Gcreate_wrap (file_id, "/MyGroup/Group_A", 0); - - // Create group "Group_B" in group "MyGroup" using relative name. - group3_id = H5Gcreate_wrap (group1_id, "Group_B", 0); - - // Close groups. - status = H5Gclose_wrap (group1_id); - status = H5Gclose_wrap (group2_id); - status = H5Gclose_wrap (group3_id); - - // Close the file. - status = H5Fclose_wrap (file_id); - } - - - // Help function for creating a new file - public static int H5Fcreate_wrap (String name, int flags, - int create_id, int access_id) - { - int file_id = -1; // file identifier - try - { - // Create a new file using default file properties. - file_id = H5.H5Fcreate (name, flags, create_id, access_id); - - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("CreateGroupAR.H5Fcreate_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("CreateGroupAR.H5Fcreate_wrap() with other Exception: " - + e.getMessage()); - } - return file_id; - } - - - // Help function for creating a group named "/MyGroup" in the file. - public static int H5Gcreate_wrap (int loc_id, String name, int size_hint) - { - int group_id = -1; // group identifier - try - { - // Create a group - group_id = H5.H5Gcreate (loc_id, name, size_hint); - - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("CreateGroupAR.H5Gcreate_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("CreateGroupAR.H5Gcreate_wrap() with other Exception: " - + e.getMessage()); - } - return group_id; - } - - - // Help function for closing the group - public static int H5Gclose_wrap (int group_id) - { - int status = -1; - - try - { - // Close the group - status = H5.H5Gclose (group_id); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("CreateGroupAR.H5Gclose_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("CreateGroupAR.H5Gclose_wrap() with other exception: " - + e.getMessage()); - } - return status; - } - - - // Help function for terminating access to the file. - public static int H5Fclose_wrap (int file_id) - { - int status = -1; - - try - { - // Terminate access to the file. - status = H5.H5Fclose (file_id); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("CreateGroupAR.H5Fclose_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("CreateGroupAR.H5Fclose_wrap() with other exception: " - + e.getMessage()); - } - return status; - } -} diff --git a/doc/html/Tutor/examples/java/CreateGroupDataset.java b/doc/html/Tutor/examples/java/CreateGroupDataset.java deleted file mode 100644 index f0fbeaa..0000000 --- a/doc/html/Tutor/examples/java/CreateGroupDataset.java +++ /dev/null @@ -1,340 +0,0 @@ -/****************************************************************** - * CreateGroupDataset.java (for HDF5 tutorial lesson 10) - * - * -- Creating a dataset in a particular group - * (a java conversion from h5_crtgrpd.c) - * - ******************************************************************/ - -import ncsa.hdf.hdf5lib.*; -import ncsa.hdf.hdf5lib.exceptions.*; - -public class CreateGroupDataset -{ - public static void main(String []argv) - { - final String FILE = "groups.h5"; - int file_id = -1; // file identifier - int group_id = -1; // group identifier - int dataset_id; - int dataspace_id; - int status = -1; - - long[] dims = new long[2]; - int[][] dset1_data = new int[3][3]; - int[][] dset2_data = new int[2][10]; - int i = -1, j = -1; - - // Initialize the first dataset. - for (i = 0; i < 3; i++) - for (j = 0; j < 3; j++) - dset1_data[i][j] = j + 1; - - // Initialize the second dataset. - for (i = 0; i < 2; i++) - for (j = 0; j < 10; j++) - dset2_data[i][j] = j + 1; - - // Open an existing file. - file_id = H5Fopen_wrap (FILE, HDF5Constants.H5F_ACC_RDWR, - HDF5Constants.H5P_DEFAULT); - - // Create the data space for the first dataset. - dims[0] = 3; - dims[1] = 3; - dataspace_id = H5Screate_simple_wrap (2, dims, null); - - // Create a dataset in group "MyGroup". - dataset_id = - H5Dcreate_wrap (file_id, "/MyGroup/dset1", - H5.J2C (HDF5CDataTypes.JH5T_STD_I32BE), - dataspace_id, HDF5Constants.H5P_DEFAULT); - - // Write the first dataset. - status = H5Dwrite_wrap - (dataset_id, - H5.J2C (HDF5CDataTypes.JH5T_NATIVE_INT), - HDF5Constants.H5S_ALL, HDF5Constants.H5S_ALL, - HDF5Constants.H5P_DEFAULT, dset1_data); - - // Close the data space for the first dataset. - status = H5Sclose_wrap (dataspace_id); - - // Close the first dataset. - status = H5Dclose_wrap (dataset_id); - - // Open an existing group of the specified file. - group_id = H5Gopen_wrap (file_id, "/MyGroup/Group_A"); - - // Create the data space for the second dataset. - dims[0] = 2; - dims[1] = 10; - dataspace_id = H5Screate_simple_wrap (2, dims, null); - - // Create the second dataset in group "Group_A". - dataset_id = - H5Dcreate_wrap (group_id, "dset2", - H5.J2C (HDF5CDataTypes.JH5T_STD_I32BE), - dataspace_id, HDF5Constants.H5P_DEFAULT); - - // Write the second dataset. - status = H5Dwrite_wrap - (dataset_id, - H5.J2C (HDF5CDataTypes.JH5T_NATIVE_INT), - HDF5Constants.H5S_ALL, HDF5Constants.H5S_ALL, - HDF5Constants.H5P_DEFAULT, dset2_data); - - // Close the data space for the second dataset. - status = H5Sclose_wrap (dataspace_id); - - // Close the second dataset - status = H5Dclose_wrap (dataset_id); - - // Close the group. - status = H5Gclose_wrap (group_id); - - // Close the file. - status = H5Fclose_wrap (file_id); - } - - - // Help function for opening an existing file - public static int H5Fopen_wrap (String name, int flags, int access_id) - { - int file_id = -1; // file identifier - try - { - // Create a new file using default file properties. - file_id = H5.H5Fopen (name, flags, access_id); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("CreateGroupDataset.H5Fopen_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("CreateGroupDataset.H5Fopen_wrap() with other Exception: " - + e.getMessage()); - } - return file_id; - } - - - // Help function for creating a new simple dataspace and opening it - // for access - public static int H5Screate_simple_wrap (int rank, long dims[], - long maxdims[]) - { - int dataspace_id = -1; // dataspace identifier - - try - { - // Create the data space for the dataset. - dataspace_id = H5.H5Screate_simple (rank, dims, maxdims); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("CreateGroupDataset.H5Screate_simple_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("CreateGroupDataset.H5Screate_simple_wrap() with other Exception: " - + e.getMessage()); - } - return dataspace_id; - } - - - // Help function for creating a dataset - public static int H5Dcreate_wrap (int loc_id, String name, int type_id, - int space_id, int create_plist_id) - { - int dataset_id = -1; // dataset identifier - - try - { - // Create the dataset - dataset_id = H5.H5Dcreate (loc_id, name, type_id, space_id, - create_plist_id); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("CreateGroupDataset.H5Dcreate_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("CreateGroupDataset.H5Dcreate_wrap() with other Exception: " - + e.getMessage()); - } - return dataset_id; - } - - - // Help function for writing the dataset - public static int H5Dwrite_wrap (int dataset_id, int mem_type_id, - int mem_space_id, int file_space_id, - int xfer_plist_id, Object buf) - { - int status = -1; - - try - { - // Write the dataset. - status = H5.H5Dwrite (dataset_id, mem_type_id, mem_space_id, - file_space_id, xfer_plist_id, buf); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("CreateGroupDataset.H5Dwrite_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("CreateGroupDataset.H5Dwrite_wrap() with other exception: " - + e.getMessage()); - } - return status; - } - - - // Help function for terminating access to the data space. - public static int H5Sclose_wrap (int dataspace_id) - { - int status = -1; - - try - { - // Terminate access to the data space. - status = H5.H5Sclose (dataspace_id); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("CreateGroupDataset.H5Sclose_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("CreateGroupDataset.H5Sclose_wrap() with other exception: " - + e.getMessage()); - } - return status; - } - - - // Help function for ending access to the dataset and releasing - // resources used by it. - public static int H5Dclose_wrap (int dataset_id) - { - int status = -1; - - try - { - // End access to the dataset and release resources used by it. - status = H5.H5Dclose (dataset_id); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("CreateGroupDataset.H5Dclose_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("CreateGroupDataset.H5Dclose_wrap() with other exception: " - + e.getMessage()); - } - return status; - } - - - // Help function for opening a group - public static int H5Gopen_wrap (int loc_id, String name) - { - int group_id = -1; // group identifier - try - { - // Create a group - group_id = H5.H5Gopen (loc_id, name); - - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("CreateGroupDataset.H5Gopen_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("CreateGroupDataset.H5Gopen_wrap() with other Exception: " - + e.getMessage()); - } - return group_id; - } - - - // Help function for closing the group - public static int H5Gclose_wrap (int group_id) - { - int status = -1; - - try - { - // Close the group - status = H5.H5Gclose (group_id); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("CreateGroupDataset.H5Gclose_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("CreateGroupDataset.H5Gclose_wrap() with other exception: " - + e.getMessage()); - } - return status; - } - - - // Help function for terminating access to the file. - public static int H5Fclose_wrap (int file_id) - { - int status = -1; - - try - { - // Terminate access to the file. - status = H5.H5Fclose (file_id); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("CreateGroupDataset.H5Fclose_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("CreateGroupDataset.H5Fclose_wrap() with other exception: " - + e.getMessage()); - } - return status; - } -} diff --git a/doc/html/Tutor/examples/java/DatasetRdWt.java b/doc/html/Tutor/examples/java/DatasetRdWt.java deleted file mode 100644 index 4c26d0f..0000000 --- a/doc/html/Tutor/examples/java/DatasetRdWt.java +++ /dev/null @@ -1,213 +0,0 @@ -/****************************************************************** - * DatasetRdWt.java (for HDF5 tutorial lesson 6) - * - * -- Reading and Writing an existing Dataset - * (a java conversion from h5_rdwt.c) - * - ******************************************************************/ - -import ncsa.hdf.hdf5lib.*; -import ncsa.hdf.hdf5lib.exceptions.*; - -public class DatasetRdWt -{ - public static void main(String []argv) - { - final String FILE = "dset.h5"; - int file_id = -1; // file identifier - int dataset_id = -1; // dataset identifier - int status = -1; - int[][] dset_data = new int[4][6]; - - // Initialize the dataset. - for (int i = 0; i < 4; i++) - for (int j = 0; j < 6; j++) - dset_data[i][j] = i * 6 + j + 1; - - // Open an existing file - file_id = H5Fopen_wrap (FILE, HDF5Constants.H5F_ACC_RDWR, - HDF5Constants.H5P_DEFAULT); - - // Open an existing dataset. - dataset_id = H5Dopen_wrap (file_id, "/dset"); - - // Write the dataset. - status = H5Dwrite_wrap - (dataset_id, H5.J2C (HDF5CDataTypes.JH5T_NATIVE_INT), - HDF5Constants.H5S_ALL, HDF5Constants.H5S_ALL, - HDF5Constants.H5P_DEFAULT, dset_data); - - status = H5Dread_wrap - (dataset_id, H5.J2C (HDF5CDataTypes.JH5T_NATIVE_INT), - HDF5Constants.H5S_ALL, HDF5Constants.H5S_ALL, - HDF5Constants.H5P_DEFAULT, dset_data); - - // Close the dataset. - status = H5Dclose_wrap (dataset_id); - - // Close the file. - status = H5Fclose_wrap (file_id); - } - - - // Help function for opening an existing file - public static int H5Fopen_wrap (String name, int flags, int access_id) - { - int file_id = -1; // file identifier - try - { - // Create a new file using default file properties. - file_id = H5.H5Fopen (name, flags, access_id); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("DatasetRdWt.H5Fopen_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("DatasetRdWt.H5Fopen_wrap() with other Exception: " - + e.getMessage()); - } - return file_id; - } - - - // Help function for opening an existing dataset - public static int H5Dopen_wrap (int loc_id, String name) - { - int dataset_id = -1; // dataset identifier - - try - { - // Opening an existing dataset - dataset_id = H5.H5Dopen (loc_id, name); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("DatasetRdWt.H5Dopen_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("DatasetRdWt.H5Dopen_wrap() with other Exception: " - + e.getMessage()); - } - return dataset_id; - } - - - // Help function for writing the dataset - public static int H5Dwrite_wrap (int dataset_id, int mem_type_id, - int mem_space_id, int file_space_id, - int xfer_plist_id, Object buf) - { - int status = -1; - - try - { - // Write the dataset. - status = H5.H5Dwrite (dataset_id, mem_type_id, mem_space_id, - file_space_id, xfer_plist_id, buf); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("DatasetRdWt.H5Dwrite_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("DatasetRdWt.H5Dwrite_wrap() with other exception: " - + e.getMessage()); - } - return status; - } - - - // Help function for reading the dataset - public static int H5Dread_wrap (int dataset_id, int mem_type_id, - int mem_space_id, int file_space_id, - int xfer_plist_id, Object obj) - { - int status = -1; - - try - { - // Read the dataset. - status = H5.H5Dread (dataset_id, mem_type_id, mem_space_id, - file_space_id, xfer_plist_id, obj); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("DatasetRdWt.H5Dread_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("DatasetRdWt.H5Dread_wrap() with other exception: " - + e.getMessage()); - } - return status; - } - - - // Help function for ending access to the dataset and releasing - // resources used by it. - public static int H5Dclose_wrap (int dataset_id) - { - int status = -1; - - try - { - // End access to the dataset and release resources used by it. - status = H5.H5Dclose (dataset_id); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("DatasetRdWt.H5Dclose_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("DatasetRdWt.H5Dclose_wrap() with other exception: " - + e.getMessage()); - } - return status; - } - - - // Help function for terminating access to the file. - public static int H5Fclose_wrap (int file_id) - { - int status = -1; - - try - { - // Terminate access to the file. - status = H5.H5Fclose (file_id); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("DatasetRdWt.H5Fclose_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("DatasetRdWt.H5Fclose_wrap() with other exception: " - + e.getMessage()); - } - return status; - } -} diff --git a/doc/html/Tutor/examples/java/Dependencies b/doc/html/Tutor/examples/java/Dependencies deleted file mode 100644 index e69de29..0000000 diff --git a/doc/html/Tutor/examples/java/HyperSlab.java b/doc/html/Tutor/examples/java/HyperSlab.java deleted file mode 100644 index 5f8818d..0000000 --- a/doc/html/Tutor/examples/java/HyperSlab.java +++ /dev/null @@ -1,590 +0,0 @@ -/****************************************************************** - * HyperSlab.java (for HDF5 tutorial lesson 12) - * - * -- Writing and reading a hyperslab - * (a java conversion from h5_hyperslab.c) - * - ******************************************************************/ - -import ncsa.hdf.hdf5lib.*; -import ncsa.hdf.hdf5lib.exceptions.*; - -public class HyperSlab -{ - public static void main (String []argv) - { - final String FILE = "sds.h5"; - final String DATASETNAME = "IntArray"; - final int NX_SUB = 3; /* hyperslab dimensions */ - final int NY_SUB = 4; - final int NX = 7; /* output buffer dimensions */ - final int NY = 7; - final int NZ = 3; - final int RANK = 2; - final int RANK_OUT = 3; - final int X = 5; /* dataset dimensions */ - final int Y = 6; - - long[] dimsf = new long[2]; /* dataset dimensions */ - int[][] data = new int[X][Y]; /* data to write */ - - /* - * Data and output buffer initialization. - */ - int file, dataset; /* handles */ - int dataspace; - int memspace; - long[] dimsm = new long[3]; /* memory space dimensions */ - long[] dims_out = new long[2]; /* dataset dimensions */ - int status; - - int[][][] data_out = new int[NX][NY][NZ]; /* output buffer */ - - long[] count = new long[2]; /* size of the hyperslab in the file */ - long[] offset = new long[2]; /* hyperslab offset in the file */ - long[] count_out = new long[3]; /* size of the hyperslab in memory */ - long[] offset_out = new long[3]; /* hyperslab offset in memory */ - int i, j, k, status_n, rank; - - /********************************************************* - This writes data to the HDF5 file. - *********************************************************/ - - /* - * Data and output buffer initialization. - */ - for (j = 0; j < X; j++) - { - for (i = 0; i < Y; i++) - data[j][i] = i + j; - } - /* - * 0 1 2 3 4 5 - * 1 2 3 4 5 6 - * 2 3 4 5 6 7 - * 3 4 5 6 7 8 - * 4 5 6 7 8 9 - */ - - /* - * Create a new file using H5F_ACC_TRUNC access, - * the default file creation properties, and the default file - * access properties. - */ - file = H5Fcreate_wrap (FILE, HDF5Constants.H5F_ACC_TRUNC, - HDF5Constants.H5P_DEFAULT, - HDF5Constants.H5P_DEFAULT); - - /* - * Describe the size of the array and create the data space for fixed - * size dataset. - */ - dimsf[0] = X; - dimsf[1] = Y; - dataspace = H5Screate_simple_wrap (RANK, dimsf, null); - - /* - * Create a new dataset within the file using defined dataspace and - * default dataset creation properties. - */ - dataset = H5Dcreate_wrap - (file, DATASETNAME, H5.J2C (HDF5CDataTypes.JH5T_STD_I32BE), - dataspace, HDF5Constants.H5P_DEFAULT); - - /* - * Write the data to the dataset using default transfer properties. - */ - status = H5Dwrite_wrap - (dataset, H5.J2C (HDF5CDataTypes.JH5T_NATIVE_INT), - HDF5Constants.H5S_ALL, HDF5Constants.H5S_ALL, - HDF5Constants.H5P_DEFAULT, data); - - /* - * Close/release resources. - */ - H5Sclose_wrap (dataspace); - H5Dclose_wrap (dataset); - H5Fclose_wrap (file); - - /************************************************************* - - This reads the hyperslab from the sds.h5 file just - created, into a 2-dimensional plane of the 3-dimensional - array. - - ************************************************************/ - - for (j = 0; j < NX; j++) - { - for (i = 0; i < NY; i++) - { - for (k = 0; k < NZ ; k++) - data_out[j][i][k] = 0; - } - } - - /* - * Open the file and the dataset. - */ - file = H5Fopen_wrap (FILE, HDF5Constants.H5F_ACC_RDONLY, - HDF5Constants.H5P_DEFAULT); - dataset = H5Dopen_wrap (file, DATASETNAME); - - dataspace = H5Dget_space_wrap (dataset); /* dataspace handle */ - rank = H5Sget_simple_extent_ndims_wrap (dataspace); - status_n = H5Sget_simple_extent_dims_wrap (dataspace, dims_out, null); - - System.out.println ("Rank: " + rank); - System.out.println ("Dimensions: "+ dims_out[0] + " x " + dims_out[1]); - - /* - * Define hyperslab in the dataset. - */ - offset[0] = 1; - offset[1] = 2; - count[0] = NX_SUB; - count[1] = NY_SUB; - status = H5Sselect_hyperslab_wrap (dataspace, - HDF5Constants.H5S_SELECT_SET, - offset, null, count, null); - - /* - * Define the memory dataspace. - */ - dimsm[0] = NX; - dimsm[1] = NY; - dimsm[2] = NZ; - memspace = H5Screate_simple_wrap (RANK_OUT, dimsm, null); - - /* - * Define memory hyperslab. - */ - offset_out[0] = 3; - offset_out[1] = 0; - offset_out[2] = 0; - count_out[0] = NX_SUB; - count_out[1] = NY_SUB; - count_out[2] = 1; - status = H5Sselect_hyperslab_wrap (memspace, - HDF5Constants.H5S_SELECT_SET, - offset_out, null, count_out, null); - - /* - * Read data from hyperslab in the file into the hyperslab in - * memory and display. - */ - status = - H5Dread_wrap (dataset, H5.J2C (HDF5CDataTypes.JH5T_NATIVE_INT), - memspace, dataspace, HDF5Constants.H5P_DEFAULT, - data_out); - - System.out.println ("Data:"); - for (j = 0; j < NX; j++) - { - for (i = 0; i < NY; i++) - System.out.print (data_out[j][i][0]); - System.out.println (); - } - System.out.println (); - - /* - * 0 0 0 0 0 0 0 - * 0 0 0 0 0 0 0 - * 0 0 0 0 0 0 0 - * 3 4 5 6 0 0 0 - * 4 5 6 7 0 0 0 - * 5 6 7 8 0 0 0 - * 0 0 0 0 0 0 0 - */ - - /* - * Close and release resources. - */ - H5Dclose_wrap (dataset); - H5Sclose_wrap (dataspace); - H5Sclose_wrap (memspace); - H5Fclose_wrap (file); - } - - - // Help function for creating a new file - public static int H5Fcreate_wrap (String name, int flags, - int create_id, int access_id) - { - int file_id = -1; // file identifier - try - { - // Create a new file using default file properties. - file_id = H5.H5Fcreate (name, flags, create_id, access_id); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("HyperSlab.H5Fcreate_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("HyperSlab.H5Fcreate_wrap() with other Exception: " - + e.getMessage()); - } - return file_id; - } - - - // Help function for opening an existing file - public static int H5Fopen_wrap (String name, int flags, int access_id) - { - int file_id = -1; // file identifier - try - { - // Create a new file using default file properties. - file_id = H5.H5Fopen (name, flags, access_id); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("HyperSlab.H5Fopen_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("HyperSlab.H5Fopen_wrap() with other Exception: " - + e.getMessage()); - } - return file_id; - } - - - // Help function for opening an existing dataset - public static int H5Dopen_wrap (int loc_id, String name) - { - int dataset_id = -1; // dataset identifier - - try - { - // Opening an existing dataset - dataset_id = H5.H5Dopen (loc_id, name); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("HyperSlab.H5Dopen_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("HyperSlab.H5Dopen_wrap() with other Exception: " - + e.getMessage()); - } - return dataset_id; - } - - - // Help function for creating a new simple dataspace and opening it - // for access - public static int H5Screate_simple_wrap (int rank, long dims[], - long maxdims[]) - { - int dataspace_id = -1; // dataspace identifier - - try - { - // Create the data space for the dataset. - dataspace_id = H5.H5Screate_simple (rank, dims, maxdims); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("HyperSlab.H5Screate_simple_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("HyperSlab.H5Screate_simple_wrap() with other Exception: " - + e.getMessage()); - } - return dataspace_id; - } - - - // Help function for getting an identifier for a copy of - // the dataspace for a dataset - public static int H5Dget_space_wrap (int dataset_id) - { - int dataspace_id = -1; - - try - { - // Returning an identifier for a copy of the dataspace for a dataset - dataspace_id = H5.H5Dget_space (dataset_id); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("HyperSlab.H5Dget_space_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("HyperSlab.H5Dget_space_wrap() with other Exception: " - + e.getMessage()); - } - return dataspace_id; - } - - - // Help function for determining the dimensionality (or rank) of - // a dataspace - public static int H5Sget_simple_extent_ndims_wrap (int space_id) - { - int rank = -1; - - try - { - // Determine the dimensionality (or rank) of a dataspace. - rank = H5.H5Sget_simple_extent_ndims (space_id); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("HyperSlab.H5Sget_simple_extent_ndims_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("HyperSlab.H5Sget_simple_extent_ndims_wrap() with other Exception: " - + e.getMessage()); - } - return rank; - } - - - // Help function for returning the size and maximum sizes of each - // dimension of a dataspace through the dims and maxdims parameters. - public static int H5Sget_simple_extent_dims_wrap (int space_id, - long dims[], - long maxdims[]) - { - int dimension_number = -1; - - try - { - dimension_number = H5.H5Sget_simple_extent_dims (space_id, dims, - maxdims); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("HyperSlab.H5Sget_simple_extent_dims_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("HyperSlab.H5Sget_simple_extent_dims_wrap() with other Exception: " - + e.getMessage()); - } - return dimension_number; - } - - - // Help function for selecting a hyperslab region to add to the - // current selected region for the dataspace specified by space_id. - public static int H5Sselect_hyperslab_wrap (int space_id, int op, - long start[], long stride[], - long count[], long block[]) - { - int status = -1; - - try - { - status = H5.H5Sselect_hyperslab (space_id, op, start, stride, - count, block); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("HyperSlab.H5Sselect_hyperslab_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("HyperSlab.H5Sselect_hyperslab_wrap() with other Exception: " - + e.getMessage()); - } - return status; - } - - - // Help function for creating a dataset - public static int H5Dcreate_wrap (int loc_id, String name, int type_id, - int space_id, int create_plist_id) - { - int dataset_id = -1; // dataset identifier - - try - { - // Create the dataset - dataset_id = H5.H5Dcreate (loc_id, name, type_id, space_id, - create_plist_id); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("HyperSlab.H5Dcreate_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("HyperSlab.H5Dcreate_wrap() with other Exception: " - + e.getMessage()); - } - return dataset_id; - } - - - // Help function for writing the dataset - public static int H5Dwrite_wrap (int dataset_id, int mem_type_id, - int mem_space_id, int file_space_id, - int xfer_plist_id, Object buf) - { - int status = -1; - - try - { - // Write the dataset. - status = H5.H5Dwrite (dataset_id, mem_type_id, mem_space_id, - file_space_id, xfer_plist_id, buf); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("HyperSlab.H5Dwrite_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("HyperSlab.H5Dwrite_wrap() with other exception: " - + e.getMessage()); - } - return status; - } - - - // Help function for reading the dataset - public static int H5Dread_wrap (int dataset_id, int mem_type_id, - int mem_space_id, int file_space_id, - int xfer_plist_id, Object obj) - { - int status = -1; - - try - { - // Read the dataset. - status = H5.H5Dread (dataset_id, mem_type_id, mem_space_id, - file_space_id, xfer_plist_id, obj); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("HyperSlab.H5Dread_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("HyperSlab.H5Dread_wrap() with other exception: " - + e.getMessage()); - } - return status; - } - - - // Help function for terminating access to the data space. - public static int H5Sclose_wrap (int dataspace_id) - { - int status = -1; - - try - { - // Terminate access to the data space. - status = H5.H5Sclose (dataspace_id); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("HyperSlab.H5Sclose_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("HyperSlab.H5Sclose_wrap() with other exception: " - + e.getMessage()); - } - return status; - } - - - // Help function for ending access to the dataset and releasing - // resources used by it. - public static int H5Dclose_wrap (int dataset_id) - { - int status = -1; - - try - { - // End access to the dataset and release resources used by it. - status = H5.H5Dclose (dataset_id); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("HyperSlab.H5Dclose_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("HyperSlab.H5Dclose_wrap() with other exception: " - + e.getMessage()); - } - return status; - } - - - // Help function for terminating access to the file. - public static int H5Fclose_wrap (int file_id) - { - int status = -1; - - try - { - // Terminate access to the file. - status = H5.H5Fclose (file_id); - } - catch (HDF5Exception hdf5e) - { - System.out.println - ("HyperSlab.H5Fclose_wrap() with HDF5Exception: " - + hdf5e.getMessage()); - } - catch (Exception e) - { - System.out.println - ("HyperSlab.H5Fclose_wrap() with other exception: " - + e.getMessage()); - } - return status; - } -} diff --git a/doc/html/Tutor/examples/java/Makefile b/doc/html/Tutor/examples/java/Makefile deleted file mode 100644 index a70ab0b..0000000 --- a/doc/html/Tutor/examples/java/Makefile +++ /dev/null @@ -1,92 +0,0 @@ -# Generated automatically from Makefile.in by configure. -# /*======================================================================= -# UNIVERSITY OF ILLINOIS (UI), NATIONAL CENTER FOR SUPERCOMPUTING -# APPLICATIONS (NCSA), Software Distribution Policy for Public Domain -# Software -# -# NCSA HDF Version 5 source code and documentation are in the public -# domain, available without fee for education, research, non-commercial and -# commercial purposes. Users may distribute the binary or source code to -# third parties provided that this statement appears on all copies and that -# no charge is made for such copies. -# -# UI MAKES NO REPRESENTATIONS ABOUT THE SUITABILITY OF THE SOFTWARE FOR ANY -# PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY. THE -# UI SHALL NOT BE LIABLE FOR ANY DAMAGES SUFFERED BY THE USER OF THIS -# SOFTWARE. The software may have been developed under agreements between -# the UI and the Federal Government which entitle the Government to certain -# rights. -# -# We ask, but do not require that the following message be include in all -# derived works: -# -# Portions developed at the National Center for Supercomputing Applications -# at the University of Illinois at Urbana-Champaign. -# -# By copying this program, you, the user, agree to abide by the conditions -# and understandings with respect to any software which is marked with a -# public domain notice. -# -# =======================================================================*/ -# - - -JAVAC = /usr/java1.2/bin/javac -FIND = /bin/find - -CLASSPATH=/usr/java1.2/jre/lib/rt.jar:/afs/ncsa.uiuc.edu/projects/hdf/java/java2/mcgrath/arabica/java-hdf5 - - -.SUFFIXES: .java .class - -.java.class: - $(JAVAC) -classpath $(CLASSPATH) $< - -tutorial: ./Compound.class \ - ./Copy.class \ - ./CreateAttribute.class \ - ./CreateDataset.class \ - ./CreateFile.class \ - ./CreateFileInput.class \ - ./CreateGroup.class \ - ./CreateGroupAR.class \ - ./CreateGroupDataset.class \ - ./DatasetRdWt.class \ - ./HyperSlab.class - chmod u+x *.sh - -clean: clean-classes - -distclean: clean-classes clean-data - rm config.cache config.status config.log - rm -rf ./Makefile - -clean-classes: - $(FIND) . \( -name '#*' -o -name '*~' -o -name '*.class' \) -exec rm -f {} \; ;\ - -clean-data: - rm -rf *.h5 - -Compound: ./Compound.class -Copy: ./Copy.class -CreateAttribute: ./CreateAttribute.class -CreateDataset: ./CreateDataset.class -CreateFile: ./CreateFile.class -CreateFileInput: ./CreateFileInput.class -CreateGroup: ./CreateGroup.class -CreateGroupAR: ./CreateGroupAR.class -CreateGroupDataset: ./CreateGroupDataset.class -DatasetRdWt: ./DatasetRdWt.class -HyperSlab: ./HyperSlab.class - -CLASSES= ./Compound.class \ - ./Copy.class \ - ./CreateAttribute.class \ - ./CreateDataset.class \ - ./CreateFileInput.class \ - ./CreateFile.class \ - ./CreateGroup.class \ - ./CreateGroupAR.class \ - ./CreateGroupDataset.class \ - ./DatasetRdWt.class \ - ./HyperSlab.class diff --git a/doc/html/Tutor/examples/java/Makefile.in b/doc/html/Tutor/examples/java/Makefile.in deleted file mode 100644 index e6bd408..0000000 --- a/doc/html/Tutor/examples/java/Makefile.in +++ /dev/null @@ -1,91 +0,0 @@ -# /*======================================================================= -# UNIVERSITY OF ILLINOIS (UI), NATIONAL CENTER FOR SUPERCOMPUTING -# APPLICATIONS (NCSA), Software Distribution Policy for Public Domain -# Software -# -# NCSA HDF Version 5 source code and documentation are in the public -# domain, available without fee for education, research, non-commercial and -# commercial purposes. Users may distribute the binary or source code to -# third parties provided that this statement appears on all copies and that -# no charge is made for such copies. -# -# UI MAKES NO REPRESENTATIONS ABOUT THE SUITABILITY OF THE SOFTWARE FOR ANY -# PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY. THE -# UI SHALL NOT BE LIABLE FOR ANY DAMAGES SUFFERED BY THE USER OF THIS -# SOFTWARE. The software may have been developed under agreements between -# the UI and the Federal Government which entitle the Government to certain -# rights. -# -# We ask, but do not require that the following message be include in all -# derived works: -# -# Portions developed at the National Center for Supercomputing Applications -# at the University of Illinois at Urbana-Champaign. -# -# By copying this program, you, the user, agree to abide by the conditions -# and understandings with respect to any software which is marked with a -# public domain notice. -# -# =======================================================================*/ -# - - -JAVAC = @JAVAC@ -FIND = @FIND@ - -CLASSPATH=@CLASSPATH@ - - -.SUFFIXES: .java .class - -.java.class: - $(JAVAC) -classpath $(CLASSPATH) $< - -tutorial: ./Compound.class \ - ./Copy.class \ - ./CreateAttribute.class \ - ./CreateDataset.class \ - ./CreateFile.class \ - ./CreateFileInput.class \ - ./CreateGroup.class \ - ./CreateGroupAR.class \ - ./CreateGroupDataset.class \ - ./DatasetRdWt.class \ - ./HyperSlab.class - chmod u+x *.sh - -clean: clean-classes - -distclean: clean-classes clean-data - rm config.cache config.status config.log - rm -rf ./Makefile - -clean-classes: - $(FIND) . \( -name '#*' -o -name '*~' -o -name '*.class' \) -exec rm -f {} \; ;\ - -clean-data: - rm -rf *.h5 - -Compound: ./Compound.class -Copy: ./Copy.class -CreateAttribute: ./CreateAttribute.class -CreateDataset: ./CreateDataset.class -CreateFile: ./CreateFile.class -CreateFileInput: ./CreateFileInput.class -CreateGroup: ./CreateGroup.class -CreateGroupAR: ./CreateGroupAR.class -CreateGroupDataset: ./CreateGroupDataset.class -DatasetRdWt: ./DatasetRdWt.class -HyperSlab: ./HyperSlab.class - -CLASSES= ./Compound.class \ - ./Copy.class \ - ./CreateAttribute.class \ - ./CreateDataset.class \ - ./CreateFileInput.class \ - ./CreateFile.class \ - ./CreateGroup.class \ - ./CreateGroupAR.class \ - ./CreateGroupDataset.class \ - ./DatasetRdWt.class \ - ./HyperSlab.class diff --git a/doc/html/Tutor/examples/java/README b/doc/html/Tutor/examples/java/README deleted file mode 100644 index 95c9360..0000000 --- a/doc/html/Tutor/examples/java/README +++ /dev/null @@ -1,21 +0,0 @@ -These files are Java versions of the example programs used in -the HDF-5 tutoral: - http://hdf.ncsa.uiuc.edu/training/hdf5/ - -The examples here correspond to the examples explained in the first 13 -sections of the tutorial. - -Lesson C program Java program Topic - -4 h5_crtfile.c CreateFile.java Create an HDF-5 file. -5 h5_crtdat.c CreateDataset.java Create a dataset. -6 h5_rdwt.c DatasetRdWt.java Write/Read a dataset. -7 h5_crtatt.c CreateAttribute.java Create an attribute. -8 h5_crtgrp.c CreateGroup.java Create a group. -9 h5_crtgrpar.c CreateGroupAR.java Abs. and Rel. paths. -10 h5_crtgrpd.c CreateGroupDataset.java Create dataset in grp. - -11 h5_compound.c Compound.java Compound datatype -12 h5_hyperslab.c Hyperslab.java Selection of hyperslab -13 h5_copy.c Copy.java Selection of elements - diff --git a/doc/html/Tutor/examples/java/readme.html b/doc/html/Tutor/examples/java/readme.html deleted file mode 100644 index ac96004..0000000 --- a/doc/html/Tutor/examples/java/readme.html +++ /dev/null @@ -1,192 +0,0 @@ - - - - - - readme - - - -

      -HDF 5 Tutorial Examples in Java

      - -


      These files are Java versions of the example programs used in the -HDF-5 tutoral: -
            http://hdf.ncsa.uiuc.edu/training/hdf5/ -

      The examples here correspond to the examples explained in the first -13 sections of the tutorial. -
        -
        - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
      -
      Lesson
      -
      -
      Topic
      -
      -
      C file
      -
      -
      Java file
      -
      Lesson -4Create an HDF-5 file.h5_crtfile.cCreateFile.java
      Lesson -5Create a Dataset in an HDF-5 fileh5_crtdat.cCreateDataset.java
      Lesson 6Write and Read data in a dataseth5_rdwt.cDatasetRdWt.java
      Lesson -7Create an attribute.h5_crtatt.cCreateAttribute.java
      Lesson -8Create a group.h5_crtgrp.cCreateGroup.java
      Lesson -9Using Absolute and relative pathsh5_crtgrpar.cCreateGroupAR.java
      Lesson -10Create a dataset in a group.h5_crtgrpd.cCreateGroupDataset.java
      Lesson -11Using Compound Datatypesh5_compound.cCompound.java
      Lesson -12Selection of a hyperslab.h5_hyperslab.cHyperslab.java
      Lesson -13Selection of elements.h5_copy.cCopy.java
      - -

      -


      Some Explanation About Tutorial Examples -

      The Java tutorial programs try to stay close to the corresponding C -program. The main function's structure almost same as C program, with one -call for each HDF5 library function. For example, where the C program has -a call to H5Fopen(), the Java program has a call to H5Fopen_wrap(). -

      The wrapper functions call the HDF-5 library using the Java HDF-5 Interface -(JHI5). The HDF-5 C interface returns error codes; these are represented -by Java Exceptions in the JHI5. The wrapper function catches the exception -and prints a message. -

      For example, the H5Fopen_wrap() method calls the JHI5, and catches -any exceptions which may occur: -

         public static int H5Fopen_wrap (String name, int flags, int access_id)
      -   {
      -      int file_id = -1;    // file identifier 
      -      try 
      -      {
      -         // Create a new file using default file properties.
      -         file_id = H5.H5Fopen (name, flags, access_id);
      -      }
      -      catch (HDF5Exception hdf5e)
      -      {
      -         System.out.println 
      -             ("DatasetRdWt.H5Fopen_wrap() with HDF5Exception: "
      -              + hdf5e.getMessage());
      -      }
      -      catch (Exception e)
      -      {
      -         System.out.println 
      -             ("DatasetRdWt.H5Fopen_wrap() with other Exception: " 
      -              + e.getMessage());
      -      }
      -      return file_id;
      -   }
      - -


      -


      NCSA -
      The -National Center for Supercomputing Applications -
      University -of Illinois at Urbana-Champaign -

      hdfhelp@ncsa.uiuc.edu - - diff --git a/doc/html/Tutor/examples/java/runCompound.sh b/doc/html/Tutor/examples/java/runCompound.sh deleted file mode 100644 index ef2be38..0000000 --- a/doc/html/Tutor/examples/java/runCompound.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/sh - -JH5INSTALLDIR=/afs/ncsa/projects/hdf/java/java2/mcgrath/arabica/New5 -HDF5LIB=/afs/ncsa/projects/hdf/release/prehdf5-1.2.1/SunOS_5.7/lib - -#make this relative to the source root... -PWD=/afs/ncsa.uiuc.edu/projects/hdf/java/java2/mcgrath/arabica/java-hdf5 -LIBDIR=$JH5INSTALLDIR"/lib" - -CLASSPATH=".:"$LIBDIR"/jhdf5.jar" - -LD_LIBRARY_PATH=$HDF5LIB":"$LIBDIR"/solaris" - -export CLASSPATH -export LD_LIBRARY_PATH - -/usr/java1.2/bin/java Compound $* diff --git a/doc/html/Tutor/examples/java/runCompound.sh.in b/doc/html/Tutor/examples/java/runCompound.sh.in deleted file mode 100644 index bc58088..0000000 --- a/doc/html/Tutor/examples/java/runCompound.sh.in +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/sh - -JH5INSTALLDIR=@JH5INST@ -HDF5LIB=@HDF5LIB@ - -#make this relative to the source root... -PWD=@PWD@ -LIBDIR=$JH5INSTALLDIR"/lib" - -CLASSPATH=".:"$LIBDIR"/jhdf5.jar" - -LD_LIBRARY_PATH=$HDF5LIB":"$LIBDIR"/@JAVATARG@" - -export CLASSPATH -export LD_LIBRARY_PATH - -@JAVA@ Compound $* diff --git a/doc/html/Tutor/examples/java/runCopy.sh b/doc/html/Tutor/examples/java/runCopy.sh deleted file mode 100644 index de71783..0000000 --- a/doc/html/Tutor/examples/java/runCopy.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/sh - -JH5INSTALLDIR=/afs/ncsa/projects/hdf/java/java2/mcgrath/arabica/New5 -HDF5LIB=/afs/ncsa/projects/hdf/release/prehdf5-1.2.1/SunOS_5.7/lib - -#make this relative to the source root... -PWD=/afs/ncsa.uiuc.edu/projects/hdf/java/java2/mcgrath/arabica/java-hdf5 -LIBDIR=$JH5INSTALLDIR"/lib" - -CLASSPATH=".:"$LIBDIR"/jhdf5.jar" - -LD_LIBRARY_PATH=$HDF5LIB":"$LIBDIR"/solaris" - -export CLASSPATH -export LD_LIBRARY_PATH - -/usr/java1.2/bin/java Copy $* diff --git a/doc/html/Tutor/examples/java/runCopy.sh.in b/doc/html/Tutor/examples/java/runCopy.sh.in deleted file mode 100644 index 2fd8a46..0000000 --- a/doc/html/Tutor/examples/java/runCopy.sh.in +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/sh - -JH5INSTALLDIR=@JH5INST@ -HDF5LIB=@HDF5LIB@ - -#make this relative to the source root... -PWD=@PWD@ -LIBDIR=$JH5INSTALLDIR"/lib" - -CLASSPATH=".:"$LIBDIR"/jhdf5.jar" - -LD_LIBRARY_PATH=$HDF5LIB":"$LIBDIR"/@JAVATARG@" - -export CLASSPATH -export LD_LIBRARY_PATH - -@JAVA@ Copy $* diff --git a/doc/html/Tutor/examples/java/runCreateAttribute.sh b/doc/html/Tutor/examples/java/runCreateAttribute.sh deleted file mode 100644 index 419abce..0000000 --- a/doc/html/Tutor/examples/java/runCreateAttribute.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/sh - -JH5INSTALLDIR=/afs/ncsa/projects/hdf/java/java2/mcgrath/arabica/New5 -HDF5LIB=/afs/ncsa/projects/hdf/release/prehdf5-1.2.1/SunOS_5.7/lib - -#make this relative to the source root... -PWD=/afs/ncsa.uiuc.edu/projects/hdf/java/java2/mcgrath/arabica/java-hdf5 -LIBDIR=$JH5INSTALLDIR"/lib" - -CLASSPATH=".:"$LIBDIR"/jhdf5.jar" - -LD_LIBRARY_PATH=$HDF5LIB":"$LIBDIR"/solaris" - -export CLASSPATH -export LD_LIBRARY_PATH - -/usr/java1.2/bin/java CreateAttribute $* diff --git a/doc/html/Tutor/examples/java/runCreateAttribute.sh.in b/doc/html/Tutor/examples/java/runCreateAttribute.sh.in deleted file mode 100644 index 83bcdc7..0000000 --- a/doc/html/Tutor/examples/java/runCreateAttribute.sh.in +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/sh - -JH5INSTALLDIR=@JH5INST@ -HDF5LIB=@HDF5LIB@ - -#make this relative to the source root... -PWD=@PWD@ -LIBDIR=$JH5INSTALLDIR"/lib" - -CLASSPATH=".:"$LIBDIR"/jhdf5.jar" - -LD_LIBRARY_PATH=$HDF5LIB":"$LIBDIR"/@JAVATARG@" - -export CLASSPATH -export LD_LIBRARY_PATH - -@JAVA@ CreateAttribute $* diff --git a/doc/html/Tutor/examples/java/runCreateDataset.sh b/doc/html/Tutor/examples/java/runCreateDataset.sh deleted file mode 100644 index 371e811..0000000 --- a/doc/html/Tutor/examples/java/runCreateDataset.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/sh - -JH5INSTALLDIR=/afs/ncsa/projects/hdf/java/java2/mcgrath/arabica/New5 -HDF5LIB=/afs/ncsa/projects/hdf/release/prehdf5-1.2.1/SunOS_5.7/lib - -#make this relative to the source root... -PWD=/afs/ncsa.uiuc.edu/projects/hdf/java/java2/mcgrath/arabica/java-hdf5 -LIBDIR=$JH5INSTALLDIR"/lib" - -CLASSPATH=".:"$LIBDIR"/jhdf5.jar" - -LD_LIBRARY_PATH=$HDF5LIB":"$LIBDIR"/solaris" - -export CLASSPATH -export LD_LIBRARY_PATH - -/usr/java1.2/bin/java CreateDataset $* diff --git a/doc/html/Tutor/examples/java/runCreateDataset.sh.in b/doc/html/Tutor/examples/java/runCreateDataset.sh.in deleted file mode 100644 index 606e153..0000000 --- a/doc/html/Tutor/examples/java/runCreateDataset.sh.in +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/sh - -JH5INSTALLDIR=@JH5INST@ -HDF5LIB=@HDF5LIB@ - -#make this relative to the source root... -PWD=@PWD@ -LIBDIR=$JH5INSTALLDIR"/lib" - -CLASSPATH=".:"$LIBDIR"/jhdf5.jar" - -LD_LIBRARY_PATH=$HDF5LIB":"$LIBDIR"/@JAVATARG@" - -export CLASSPATH -export LD_LIBRARY_PATH - -@JAVA@ CreateDataset $* diff --git a/doc/html/Tutor/examples/java/runCreateFile.sh b/doc/html/Tutor/examples/java/runCreateFile.sh deleted file mode 100644 index e32c0ab..0000000 --- a/doc/html/Tutor/examples/java/runCreateFile.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/sh - -JH5INSTALLDIR=/afs/ncsa/projects/hdf/java/java2/mcgrath/arabica/New5 -HDF5LIB=/afs/ncsa/projects/hdf/release/prehdf5-1.2.1/SunOS_5.7/lib - -#make this relative to the source root... -PWD=/afs/ncsa.uiuc.edu/projects/hdf/java/java2/mcgrath/arabica/java-hdf5 -LIBDIR=$JH5INSTALLDIR"/lib" - -CLASSPATH=".:"$LIBDIR"/jhdf5.jar" - -LD_LIBRARY_PATH=$HDF5LIB":"$LIBDIR"/solaris" - -export CLASSPATH -export LD_LIBRARY_PATH - -/usr/java1.2/bin/java CreateFile $* diff --git a/doc/html/Tutor/examples/java/runCreateFile.sh.in b/doc/html/Tutor/examples/java/runCreateFile.sh.in deleted file mode 100644 index bf48b9c..0000000 --- a/doc/html/Tutor/examples/java/runCreateFile.sh.in +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/sh - -JH5INSTALLDIR=@JH5INST@ -HDF5LIB=@HDF5LIB@ - -#make this relative to the source root... -PWD=@PWD@ -LIBDIR=$JH5INSTALLDIR"/lib" - -CLASSPATH=".:"$LIBDIR"/jhdf5.jar" - -LD_LIBRARY_PATH=$HDF5LIB":"$LIBDIR"/@JAVATARG@" - -export CLASSPATH -export LD_LIBRARY_PATH - -@JAVA@ CreateFile $* diff --git a/doc/html/Tutor/examples/java/runCreateFileInput.sh b/doc/html/Tutor/examples/java/runCreateFileInput.sh deleted file mode 100644 index fa12f06..0000000 --- a/doc/html/Tutor/examples/java/runCreateFileInput.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/sh - -JH5INSTALLDIR=/afs/ncsa/projects/hdf/java/java2/mcgrath/arabica/New5 -HDF5LIB=/afs/ncsa/projects/hdf/release/prehdf5-1.2.1/SunOS_5.7/lib - -#make this relative to the source root... -PWD=/afs/ncsa.uiuc.edu/projects/hdf/java/java2/mcgrath/arabica/java-hdf5 -LIBDIR=$JH5INSTALLDIR"/lib" - -CLASSPATH=".:"$LIBDIR"/jhdf5.jar" - -LD_LIBRARY_PATH=$HDF5LIB":"$LIBDIR"/solaris" - -export CLASSPATH -export LD_LIBRARY_PATH - -/usr/java1.2/bin/java CreateFileInput $* diff --git a/doc/html/Tutor/examples/java/runCreateFileInput.sh.in b/doc/html/Tutor/examples/java/runCreateFileInput.sh.in deleted file mode 100644 index 776eac5..0000000 --- a/doc/html/Tutor/examples/java/runCreateFileInput.sh.in +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/sh - -JH5INSTALLDIR=@JH5INST@ -HDF5LIB=@HDF5LIB@ - -#make this relative to the source root... -PWD=@PWD@ -LIBDIR=$JH5INSTALLDIR"/lib" - -CLASSPATH=".:"$LIBDIR"/jhdf5.jar" - -LD_LIBRARY_PATH=$HDF5LIB":"$LIBDIR"/@JAVATARG@" - -export CLASSPATH -export LD_LIBRARY_PATH - -@JAVA@ CreateFileInput $* diff --git a/doc/html/Tutor/examples/java/runCreateGroup.sh b/doc/html/Tutor/examples/java/runCreateGroup.sh deleted file mode 100644 index ee9deee..0000000 --- a/doc/html/Tutor/examples/java/runCreateGroup.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/sh - -JH5INSTALLDIR=/afs/ncsa/projects/hdf/java/java2/mcgrath/arabica/New5 -HDF5LIB=/afs/ncsa/projects/hdf/release/prehdf5-1.2.1/SunOS_5.7/lib - -#make this relative to the source root... -PWD=/afs/ncsa.uiuc.edu/projects/hdf/java/java2/mcgrath/arabica/java-hdf5 -LIBDIR=$JH5INSTALLDIR"/lib" - -CLASSPATH=".:"$LIBDIR"/jhdf5.jar" - -LD_LIBRARY_PATH=$HDF5LIB":"$LIBDIR"/solaris" - -export CLASSPATH -export LD_LIBRARY_PATH - -/usr/java1.2/bin/java CreateGroup $* diff --git a/doc/html/Tutor/examples/java/runCreateGroup.sh.in b/doc/html/Tutor/examples/java/runCreateGroup.sh.in deleted file mode 100644 index e2eadb5..0000000 --- a/doc/html/Tutor/examples/java/runCreateGroup.sh.in +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/sh - -JH5INSTALLDIR=@JH5INST@ -HDF5LIB=@HDF5LIB@ - -#make this relative to the source root... -PWD=@PWD@ -LIBDIR=$JH5INSTALLDIR"/lib" - -CLASSPATH=".:"$LIBDIR"/jhdf5.jar" - -LD_LIBRARY_PATH=$HDF5LIB":"$LIBDIR"/@JAVATARG@" - -export CLASSPATH -export LD_LIBRARY_PATH - -@JAVA@ CreateGroup $* diff --git a/doc/html/Tutor/examples/java/runCreateGroupAR.sh b/doc/html/Tutor/examples/java/runCreateGroupAR.sh deleted file mode 100644 index 2619a11..0000000 --- a/doc/html/Tutor/examples/java/runCreateGroupAR.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/sh - -JH5INSTALLDIR=/afs/ncsa/projects/hdf/java/java2/mcgrath/arabica/New5 -HDF5LIB=/afs/ncsa/projects/hdf/release/prehdf5-1.2.1/SunOS_5.7/lib - -#make this relative to the source root... -PWD=/afs/ncsa.uiuc.edu/projects/hdf/java/java2/mcgrath/arabica/java-hdf5 -LIBDIR=$JH5INSTALLDIR"/lib" - -CLASSPATH=".:"$LIBDIR"/jhdf5.jar" - -LD_LIBRARY_PATH=$HDF5LIB":"$LIBDIR"/solaris" - -export CLASSPATH -export LD_LIBRARY_PATH - -/usr/java1.2/bin/java CreateGroupAR $* diff --git a/doc/html/Tutor/examples/java/runCreateGroupAR.sh.in b/doc/html/Tutor/examples/java/runCreateGroupAR.sh.in deleted file mode 100644 index d61d852..0000000 --- a/doc/html/Tutor/examples/java/runCreateGroupAR.sh.in +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/sh - -JH5INSTALLDIR=@JH5INST@ -HDF5LIB=@HDF5LIB@ - -#make this relative to the source root... -PWD=@PWD@ -LIBDIR=$JH5INSTALLDIR"/lib" - -CLASSPATH=".:"$LIBDIR"/jhdf5.jar" - -LD_LIBRARY_PATH=$HDF5LIB":"$LIBDIR"/@JAVATARG@" - -export CLASSPATH -export LD_LIBRARY_PATH - -@JAVA@ CreateGroupAR $* diff --git a/doc/html/Tutor/examples/java/runCreateGroupDataset.sh b/doc/html/Tutor/examples/java/runCreateGroupDataset.sh deleted file mode 100644 index 15b7bfa..0000000 --- a/doc/html/Tutor/examples/java/runCreateGroupDataset.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/sh - -JH5INSTALLDIR=/afs/ncsa/projects/hdf/java/java2/mcgrath/arabica/New5 -HDF5LIB=/afs/ncsa/projects/hdf/release/prehdf5-1.2.1/SunOS_5.7/lib - -#make this relative to the source root... -PWD=/afs/ncsa.uiuc.edu/projects/hdf/java/java2/mcgrath/arabica/java-hdf5 -LIBDIR=$JH5INSTALLDIR"/lib" - -CLASSPATH=".:"$LIBDIR"/jhdf5.jar" - -LD_LIBRARY_PATH=$HDF5LIB":"$LIBDIR"/solaris" - -export CLASSPATH -export LD_LIBRARY_PATH - -/usr/java1.2/bin/java CreateGroupDataset $* diff --git a/doc/html/Tutor/examples/java/runCreateGroupDataset.sh.in b/doc/html/Tutor/examples/java/runCreateGroupDataset.sh.in deleted file mode 100644 index af2b4b5..0000000 --- a/doc/html/Tutor/examples/java/runCreateGroupDataset.sh.in +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/sh - -JH5INSTALLDIR=@JH5INST@ -HDF5LIB=@HDF5LIB@ - -#make this relative to the source root... -PWD=@PWD@ -LIBDIR=$JH5INSTALLDIR"/lib" - -CLASSPATH=".:"$LIBDIR"/jhdf5.jar" - -LD_LIBRARY_PATH=$HDF5LIB":"$LIBDIR"/@JAVATARG@" - -export CLASSPATH -export LD_LIBRARY_PATH - -@JAVA@ CreateGroupDataset $* diff --git a/doc/html/Tutor/examples/java/runDatasetRdWt.sh b/doc/html/Tutor/examples/java/runDatasetRdWt.sh deleted file mode 100644 index a049ea8..0000000 --- a/doc/html/Tutor/examples/java/runDatasetRdWt.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/sh - -JH5INSTALLDIR=/afs/ncsa/projects/hdf/java/java2/mcgrath/arabica/New5 -HDF5LIB=/afs/ncsa/projects/hdf/release/prehdf5-1.2.1/SunOS_5.7/lib - -#make this relative to the source root... -PWD=/afs/ncsa.uiuc.edu/projects/hdf/java/java2/mcgrath/arabica/java-hdf5 -LIBDIR=$JH5INSTALLDIR"/lib" - -CLASSPATH=".:"$LIBDIR"/jhdf5.jar" - -LD_LIBRARY_PATH=$HDF5LIB":"$LIBDIR"/solaris" - -export CLASSPATH -export LD_LIBRARY_PATH - -/usr/java1.2/bin/java DatasetRdWt $* diff --git a/doc/html/Tutor/examples/java/runDatasetRdWt.sh.in b/doc/html/Tutor/examples/java/runDatasetRdWt.sh.in deleted file mode 100644 index ad3a049..0000000 --- a/doc/html/Tutor/examples/java/runDatasetRdWt.sh.in +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/sh - -JH5INSTALLDIR=@JH5INST@ -HDF5LIB=@HDF5LIB@ - -#make this relative to the source root... -PWD=@PWD@ -LIBDIR=$JH5INSTALLDIR"/lib" - -CLASSPATH=".:"$LIBDIR"/jhdf5.jar" - -LD_LIBRARY_PATH=$HDF5LIB":"$LIBDIR"/@JAVATARG@" - -export CLASSPATH -export LD_LIBRARY_PATH - -@JAVA@ DatasetRdWt $* diff --git a/doc/html/Tutor/examples/java/runHyperSlab.sh b/doc/html/Tutor/examples/java/runHyperSlab.sh deleted file mode 100644 index 549f807..0000000 --- a/doc/html/Tutor/examples/java/runHyperSlab.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/sh - -JH5INSTALLDIR=/afs/ncsa/projects/hdf/java/java2/mcgrath/arabica/New5 -HDF5LIB=/afs/ncsa/projects/hdf/release/prehdf5-1.2.1/SunOS_5.7/lib - -#make this relative to the source root... -PWD=/afs/ncsa.uiuc.edu/projects/hdf/java/java2/mcgrath/arabica/java-hdf5 -LIBDIR=$JH5INSTALLDIR"/lib" - -CLASSPATH=".:"$LIBDIR"/jhdf5.jar" - -LD_LIBRARY_PATH=$HDF5LIB":"$LIBDIR"/solaris" - -export CLASSPATH -export LD_LIBRARY_PATH - -/usr/java1.2/bin/java HyperSlab $* diff --git a/doc/html/Tutor/examples/java/runHyperSlab.sh.in b/doc/html/Tutor/examples/java/runHyperSlab.sh.in deleted file mode 100644 index f515fc9..0000000 --- a/doc/html/Tutor/examples/java/runHyperSlab.sh.in +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/sh - -JH5INSTALLDIR=@JH5INST@ -HDF5LIB=@HDF5LIB@ - -#make this relative to the source root... -PWD=@PWD@ -LIBDIR=$JH5INSTALLDIR"/lib" - -CLASSPATH=".:"$LIBDIR"/jhdf5.jar" - -LD_LIBRARY_PATH=$HDF5LIB":"$LIBDIR"/@JAVATARG@" - -export CLASSPATH -export LD_LIBRARY_PATH - -@JAVA@ HyperSlab $* diff --git a/doc/html/Tutor/examples/mountexample.f90 b/doc/html/Tutor/examples/mountexample.f90 deleted file mode 100644 index f4341b2..0000000 --- a/doc/html/Tutor/examples/mountexample.f90 +++ /dev/null @@ -1,187 +0,0 @@ -! -!In the following example we create one file with a group in it, -!and another file with a dataset. Mounting is used to -!access the dataset from the second file as a member of a group -!in the first file. -! - - PROGRAM MOUNTEXAMPLE - - USE HDF5 ! This module contains all necessary modules - - IMPLICIT NONE - - ! - ! Filenames are "mount1.h5" and "mount2.h5" - ! - CHARACTER(LEN=9), PARAMETER :: filename1 = "mount1.h5" - CHARACTER(LEN=9), PARAMETER :: filename2 = "mount2.h5" - - ! - !data space rank and dimensions - ! - INTEGER, PARAMETER :: RANK = 2 - INTEGER, PARAMETER :: NX = 4 - INTEGER, PARAMETER :: NY = 5 - - ! - ! File identifiers - ! - INTEGER(HID_T) :: file1_id, file2_id - - ! - ! Group identifier - ! - INTEGER(HID_T) :: gid - - ! - ! Dataset identifier - ! - INTEGER(HID_T) :: dset_id - - ! - ! Data space identifier - ! - INTEGER(HID_T) :: dataspace - - ! - ! Data type identifier - ! - INTEGER(HID_T) :: dtype_id - - ! - ! The dimensions for the dataset. - ! - INTEGER(HSIZE_T), DIMENSION(2) :: dims = (/NX,NY/) - - ! - ! Flag to check operation success - ! - INTEGER :: error - - ! - ! General purpose integer - ! - INTEGER :: i, j - - ! - ! Data buffers - ! - INTEGER, DIMENSION(NX,NY) :: data_in, data_out - - ! - ! Initialize FORTRAN interface. - ! - CALL h5open_f(error) - - ! - ! Initialize data_in buffer - ! - do i = 1, NX - do j = 1, NY - data_in(i,j) = (i-1) + (j-1) - end do - end do - - ! - ! Create first file "mount1.h5" using default properties. - ! - CALL h5fcreate_f(filename1, H5F_ACC_TRUNC_F, file1_id, error) - - ! - ! Create group "/G" inside file "mount1.h5". - ! - CALL h5gcreate_f(file1_id, "/G", gid, error) - - ! - ! Close file and group identifiers. - ! - CALL h5gclose_f(gid, error) - CALL h5fclose_f(file1_id, error) - - ! - ! Create second file "mount2.h5" using default properties. - ! - CALL h5fcreate_f(filename2, H5F_ACC_TRUNC_F, file2_id, error) - - ! - ! Create data space for the dataset. - ! - CALL h5screate_simple_f(RANK, dims, dataspace, error) - - ! - ! Create dataset "/D" inside file "mount2.h5". - ! - CALL h5dcreate_f(file2_id, "/D", H5T_NATIVE_INTEGER, dataspace, & - dset_id, error) - - ! - ! Write data_in to the dataset - ! - CALL h5dwrite_f(dset_id, H5T_NATIVE_INTEGER, data_in, error) - - ! - ! Close file, dataset and dataspace identifiers. - ! - CALL h5sclose_f(dataspace, error) - CALL h5dclose_f(dset_id, error) - CALL h5fclose_f(file2_id, error) - - ! - ! Reopen both files. - ! - CALL h5fopen_f (filename1, H5F_ACC_RDWR_F, file1_id, error) - CALL h5fopen_f (filename2, H5F_ACC_RDWR_F, file2_id, error) - - ! - ! Mount the second file under the first file's "/G" group. - ! - CALL h5fmount_f (file1_id, "/G", file2_id, error) - - - ! - ! Access dataset D in the first file under /G/D name. - ! - CALL h5dopen_f(file1_id, "/G/D", dset_id, error) - - ! - ! Get dataset's data type. - ! - CALL h5dget_type_f(dset_id, dtype_id, error) - - ! - ! Read the dataset. - ! - CALL h5dread_f(dset_id, dtype_id, data_out, error) - - ! - ! Print out the data. - ! - do i = 1, NX - print *, (data_out(i,j), j = 1, NY) - end do - - - ! - !Close dset_id and dtype_id. - ! - CALL h5dclose_f(dset_id, error) - CALL h5tclose_f(dtype_id, error) - - ! - ! Unmount the second file. - ! - CALL h5funmount_f(file1_id, "/G", error); - - ! - ! Close both files. - ! - CALL h5fclose_f(file1_id, error) - CALL h5fclose_f(file2_id, error) - ! - ! Close FORTRAN interface. - ! - CALL h5close_f(error) - - END PROGRAM MOUNTEXAMPLE - diff --git a/doc/html/Tutor/examples/refobjexample.f90 b/doc/html/Tutor/examples/refobjexample.f90 deleted file mode 100644 index fdbb26d..0000000 --- a/doc/html/Tutor/examples/refobjexample.f90 +++ /dev/null @@ -1,142 +0,0 @@ -! -! This program shows how to create and store references to the objects. -! Program creates a file, two groups, a dataset to store integer data and -! a dataset to store references to the objects. -! Stored references are used to open the objects they are point to. -! Data is written to the dereferenced dataset, and class type is displayed for -! the shared datatype. -! - PROGRAM OBJ_REFERENCES - - USE HDF5 ! This module contains all necessary modules - - IMPLICIT NONE - CHARACTER(LEN=10), PARAMETER :: filename = "FORTRAN.h5" ! File - CHARACTER(LEN=8), PARAMETER :: dsetnamei = "INTEGERS" ! Dataset with the integer data - CHARACTER(LEN=17), PARAMETER :: dsetnamer = "OBJECT_REFERENCES" ! Dataset wtih object - ! references - CHARACTER(LEN=6), PARAMETER :: groupname1 = "GROUP1" ! Groups in the file - CHARACTER(LEN=6), PARAMETER :: groupname2 = "GROUP2" ! - - INTEGER(HID_T) :: file_id ! File identifier - INTEGER(HID_T) :: grp1_id ! Group identifiers - INTEGER(HID_T) :: grp2_id ! - INTEGER(HID_T) :: dset_id ! Dataset identifiers - INTEGER(HID_T) :: dsetr_id ! - INTEGER(HID_T) :: type_id ! Type identifier - INTEGER(HID_T) :: space_id ! Dataspace identifiers - INTEGER(HID_T) :: spacer_id ! - INTEGER :: error - INTEGER(HSIZE_T), DIMENSION(1) :: dims = (/5/) - INTEGER(HSIZE_T), DIMENSION(1) :: dimsr= (/4/) - INTEGER(HSIZE_T), DIMENSION(1) :: my_maxdims = (/5/) - INTEGER :: rank = 1 - INTEGER :: rankr = 1 - TYPE(hobj_ref_t_f), DIMENSION(4) :: ref - TYPE(hobj_ref_t_f), DIMENSION(4) :: ref_out - INTEGER, DIMENSION(5) :: data = (/1, 2, 3, 4, 5/) - INTEGER :: class, ref_size - ! - ! Initialize FORTRAN interface. - ! - CALL h5open_f(error) - ! - ! Create a file - ! - CALL h5fcreate_f(filename, H5F_ACC_TRUNC_F, file_id, error) - ! Default file access and file creation - ! properties are used. - ! - ! Create a group in the file - ! - CALL h5gcreate_f(file_id, groupname1, grp1_id, error) - ! - ! Create a group inside the created gorup - ! - CALL h5gcreate_f(grp1_id, groupname2, grp2_id, error) - ! - ! Create dataspaces for datasets - ! - CALL h5screate_simple_f(rank, dims, space_id, error, maxdims=my_maxdims) - CALL h5screate_simple_f(rankr, dimsr, spacer_id, error) - ! - ! Create integer dataset - ! - CALL h5dcreate_f(file_id, dsetnamei, H5T_NATIVE_INTEGER, space_id, & - dset_id, error) - ! - ! Create dataset to store references to the objects - ! - CALL h5dcreate_f(file_id, dsetnamer, H5T_STD_REF_OBJ, spacer_id, & - dsetr_id, error) - ! - ! Create a datatype and store in the file - ! - CALL h5tcopy_f(H5T_NATIVE_REAL, type_id, error) - CALL h5tcommit_f(file_id, "MyType", type_id, error) - ! - ! Close dataspaces, groups and integer dataset - ! - CALL h5sclose_f(space_id, error) - CALL h5sclose_f(spacer_id, error) - CALL h5tclose_f(type_id, error) - CALL h5dclose_f(dset_id, error) - CALL h5gclose_f(grp1_id, error) - CALL h5gclose_f(grp2_id, error) - ! - ! Create references to two groups, integer dataset and shared datatype - ! and write it to the dataset in the file - ! - CALL h5rcreate_f(file_id, groupname1, ref(1), error) - CALL h5rcreate_f(file_id, "/GROUP1/GROUP2", ref(2), error) - CALL h5rcreate_f(file_id, dsetnamei, ref(3), error) - CALL h5rcreate_f(file_id, "MyType", ref(4), error) - ref_size = size(ref) - CALL h5dwrite_f(dsetr_id, H5T_STD_REF_OBJ, ref, ref_size, error) - ! - ! Close the dataset - ! - CALL h5dclose_f(dsetr_id, error) - ! - ! Reopen the dataset with object references and read references to the buffer - ! - CALL h5dopen_f(file_id, dsetnamer,dsetr_id,error) - ref_size = size(ref_out) - CALL h5dread_f(dsetr_id, H5T_STD_REF_OBJ, ref_out, ref_size, error) - ! - ! Dereference the third reference. We know that it is a dataset. On practice - ! one should use h5rget_object_type_f function to find out - ! the type of an object the reference points to. - ! - CALL h5rdereference_f(dsetr_id, ref(3), dset_id, error) - ! - ! Write data to the dataset. - ! - CALL h5dwrite_f(dset_id, H5T_NATIVE_INTEGER, data, error) - if (error .eq. 0) write(*,*) "Data has been successfully written to the dataset " - ! - ! Dereference the fourth reference. We know that it is a datatype. On practice - ! one should use h5rget_object_type_f function to find out - ! the type of an object the reference points to. - ! - CALL h5rdereference_f(dsetr_id, ref(4), type_id, error) - ! - ! Get datatype class and display it if it is of a FLOAT class. - ! - CALL h5tget_class_f(type_id, class, error) - if(class .eq. H5T_FLOAT_F) write(*,*) "Stored datatype is of a FLOAT class" - ! - ! Close all objects. - ! - CALL h5dclose_f(dset_id, error) - CALL h5tclose_f(type_id, error) - CALL h5dclose_f(dsetr_id, error) - CALL h5fclose_f(file_id, error) - ! - ! Close FORTRAN interface. - ! - CALL h5close_f(error) - - END PROGRAM OBJ_REFERENCES - - diff --git a/doc/html/Tutor/examples/refregexample.f90 b/doc/html/Tutor/examples/refregexample.f90 deleted file mode 100644 index 5d72f1e..0000000 --- a/doc/html/Tutor/examples/refregexample.f90 +++ /dev/null @@ -1,162 +0,0 @@ -! -! This program shows how to create, store and dereference references -! to the dataset regions. -! Program creates a file and writes two dimensional integer dataset -! to it. Then program creates and stores references to the hyperslab -! and 3 points selected in the integer dataset, in the second dataset. -! Program reopens the second dataset, reads and dereferences region -! references, and then reads and displays selected data from the -! integer dataset. -! - PROGRAM REG_REFERENCE - - USE HDF5 ! This module contains all necessary modules - - IMPLICIT NONE - CHARACTER(LEN=10), PARAMETER :: filename = "FORTRAN.h5" - CHARACTER(LEN=6), PARAMETER :: dsetnamev = "MATRIX" - CHARACTER(LEN=17), PARAMETER :: dsetnamer = "REGION_REFERENCES" - - INTEGER(HID_T) :: file_id ! File identifier - INTEGER(HID_T) :: space_id ! Dataspace identifier - INTEGER(HID_T) :: spacer_id ! Dataspace identifier - INTEGER(HID_T) :: dsetv_id ! Dataset identifier - INTEGER(HID_T) :: dsetr_id ! Dataset identifier - INTEGER :: error - TYPE(hdset_reg_ref_t_f) , DIMENSION(2) :: ref ! Buffers to store references - TYPE(hdset_reg_ref_t_f) , DIMENSION(2) :: ref_out ! - INTEGER(HSIZE_T), DIMENSION(2) :: dims = (/2,9/) ! Datasets dimensions - INTEGER(HSIZE_T), DIMENSION(1) :: dimsr = (/2/) ! - INTEGER(HSIZE_T), DIMENSION(2) :: start - INTEGER(HSIZE_T), DIMENSION(2) :: count - INTEGER :: rankr = 1 - INTEGER :: rank = 2 - INTEGER , DIMENSION(2,9) :: data - INTEGER , DIMENSION(2,9) :: data_out = 0 - INTEGER(HSIZE_T) , DIMENSION(2,3) :: coord - INTEGER(SIZE_T) ::num_points = 3 ! Number of selected points - INTEGER :: i, j - INTEGER :: ref_size - coord = reshape((/1,1,2,7,1,9/), (/2,3/)) ! Coordinates of selected points - data = reshape ((/1,1,1,2,2,2,3,3,3,4,4,4,5,5,5,6,6,6/), (/2,9/)) - ! - ! Initialize FORTRAN interface. - ! - CALL h5open_f(error) - ! - ! Create a new file. - ! - CALL h5fcreate_f(filename, H5F_ACC_TRUNC_F, file_id, error) - ! Default file access and file creation - ! properties are used. - ! - ! Create dataspaces: - ! - ! for dataset with references to dataset regions - ! - CALL h5screate_simple_f(rankr, dimsr, spacer_id, error) - ! - ! for integer dataset - ! - CALL h5screate_simple_f(rank, dims, space_id, error) - ! - ! Create and write datasets: - ! - ! Integer dataset - ! - CALL h5dcreate_f(file_id, dsetnamev, H5T_NATIVE_INTEGER, space_id, & - dsetv_id, error) - CALL h5dwrite_f(dsetv_id, H5T_NATIVE_INTEGER, data, error) - CALL h5dclose_f(dsetv_id, error) - ! - ! Dataset with references - ! - CALL h5dcreate_f(file_id, dsetnamer, H5T_STD_REF_DSETREG, spacer_id, & - dsetr_id, error) - ! - ! Create a reference to the hyperslab selection. - ! - start(1) = 0 - start(2) = 3 - count(1) = 2 - count(2) = 3 - CALL h5sselect_hyperslab_f(space_id, H5S_SELECT_SET_F, & - start, count, error) - CALL h5rcreate_f(file_id, dsetnamev, space_id, ref(1), error) - ! - ! Create a reference to elements selection. - ! - CALL h5sselect_none_f(space_id, error) - CALL h5sselect_elements_f(space_id, H5S_SELECT_SET_F, rank, num_points,& - coord, error) - CALL h5rcreate_f(file_id, dsetnamev, space_id, ref(2), error) - ! - ! Write dataset with the references. - ! - ref_size = size(ref) - CALL h5dwrite_f(dsetr_id, H5T_STD_REF_DSETREG, ref, ref_size, error) - ! - ! Close all objects. - ! - CALL h5sclose_f(space_id, error) - CALL h5sclose_f(spacer_id, error) - CALL h5dclose_f(dsetr_id, error) - CALL h5fclose_f(file_id, error) - ! - ! Reopen the file to test selections. - ! - CALL h5fopen_f (filename, H5F_ACC_RDWR_F, file_id, error) - CALL h5dopen_f(file_id, dsetnamer, dsetr_id, error) - ! - ! Read references to the dataset regions. - ! - ref_size = size(ref_out) - CALL h5dread_f(dsetr_id, H5T_STD_REF_DSETREG, ref_out, ref_size, error) - ! - ! Dereference the first reference. - ! - CALL H5rdereference_f(dsetr_id, ref_out(1), dsetv_id, error) - CALL H5rget_region_f(dsetr_id, ref_out(1), space_id, error) - ! - ! Read selected data from the dataset. - ! - CALL h5dread_f(dsetv_id, H5T_NATIVE_INTEGER, data_out, error, & - mem_space_id = space_id, file_space_id = space_id) - write(*,*) "Hypeslab selection" - write(*,*) - do i = 1,2 - write(*,*) (data_out (i,j), j = 1,9) - enddo - write(*,*) - CALL h5sclose_f(space_id, error) - CALL h5dclose_f(dsetv_id, error) - data_out = 0 - ! - ! Dereference the second reference. - ! - CALL H5rdereference_f(dsetr_id, ref_out(2), dsetv_id, error) - CALL H5rget_region_f(dsetr_id, ref_out(2), space_id, error) - ! - ! Read selected data from the dataset. - ! - CALL h5dread_f(dsetv_id, H5T_NATIVE_INTEGER, data_out, error, & - mem_space_id = space_id, file_space_id = space_id) - write(*,*) "Point selection" - write(*,*) - do i = 1,2 - write(*,*) (data_out (i,j), j = 1,9) - enddo - ! - ! Close all objects - ! - CALL h5sclose_f(space_id, error) - CALL h5dclose_f(dsetv_id, error) - CALL h5dclose_f(dsetr_id, error) - ! - ! Close FORTRAN interface. - ! - CALL h5close_f(error) - - END PROGRAM REG_REFERENCE - - diff --git a/doc/html/Tutor/examples/rwdsetexample.f90 b/doc/html/Tutor/examples/rwdsetexample.f90 deleted file mode 100644 index 729e84d..0000000 --- a/doc/html/Tutor/examples/rwdsetexample.f90 +++ /dev/null @@ -1,78 +0,0 @@ -! -! The following example shows how to write and read to/from an existing dataset. -! It opens the file created in the previous example, obtains the dataset -! identifier, writes the data to the dataset in the file, -! then reads the dataset to memory. -! - - - PROGRAM RWDSETEXAMPLE - - USE HDF5 ! This module contains all necessary modules - - IMPLICIT NONE - - CHARACTER(LEN=8), PARAMETER :: filename = "dsetf.h5" ! File name - CHARACTER(LEN=4), PARAMETER :: dsetname = "dset" ! Dataset name - - INTEGER(HID_T) :: file_id ! File identifier - INTEGER(HID_T) :: dset_id ! Dataset identifier - - INTEGER :: error ! Error flag - INTEGER :: i, j - - INTEGER, DIMENSION(4,6) :: dset_data, data_out ! Data buffers - - ! - ! Initialize the dset_data array. - ! - do i = 1, 4 - do j = 1, 6 - dset_data(i,j) = (i-1)*6 + j; - end do - end do - - ! - ! Initialize FORTRAN predefined datatypes - ! - CALL h5open_f(error) - - ! - ! Open an existing file. - ! - CALL h5fopen_f (filename, H5F_ACC_RDWR_F, file_id, error) - - ! - ! Open an existing dataset. - ! - CALL h5dopen_f(file_id, dsetname, dset_id, error) - - ! - ! Write the dataset. - ! - CALL h5dwrite_f(dset_id, H5T_NATIVE_INTEGER, dset_data, error) - - ! - ! Read the dataset. - ! - CALL h5dread_f(dset_id, H5T_NATIVE_INTEGER, data_out, error) - - ! - ! Close the dataset. - ! - CALL h5dclose_f(dset_id, error) - - ! - ! Close the file. - ! - CALL h5fclose_f(file_id, error) - - ! - ! Close FORTRAN predefined datatypes. - ! - CALL h5close_f(error) - - END PROGRAM RWDSETEXAMPLE - - - diff --git a/doc/html/Tutor/examples/selectele.f90 b/doc/html/Tutor/examples/selectele.f90 deleted file mode 100644 index 8727bd9..0000000 --- a/doc/html/Tutor/examples/selectele.f90 +++ /dev/null @@ -1,282 +0,0 @@ -! -! This program creates two files, copy1.h5, and copy2.h5. -! In copy1.h5, it creates a 3x4 dataset called 'Copy1', -! and write 0's to this dataset. -! In copy2.h5, it create a 3x4 dataset called 'Copy2', -! and write 1's to this dataset. -! It closes both files, reopens both files, selects two -! points in copy1.h5 and writes values to them. Then it -! uses an H5Scopy to write the same selection to copy2.h5. -! Program reopens the files, and reads and prints the contents of -! the two datasets. -! - - PROGRAM SELECTEXAMPLE - - USE HDF5 ! This module contains all necessary modules - - IMPLICIT NONE - - CHARACTER(LEN=8), PARAMETER :: filename1 = "copy1.h5" ! File name - CHARACTER(LEN=8), PARAMETER :: filename2 = "copy2.h5" ! - CHARACTER(LEN=5), PARAMETER :: dsetname1 = "Copy1" ! Dataset name - CHARACTER(LEN=5), PARAMETER :: dsetname2 = "Copy2" ! - - INTEGER, PARAMETER :: RANK = 2 ! Dataset rank - - INTEGER(SIZE_T), PARAMETER :: NUMP = 2 ! Number of points selected - - INTEGER(HID_T) :: file1_id ! File1 identifier - INTEGER(HID_T) :: file2_id ! File2 identifier - INTEGER(HID_T) :: dset1_id ! Dataset1 identifier - INTEGER(HID_T) :: dset2_id ! Dataset2 identifier - INTEGER(HID_T) :: dataspace1 ! Dataspace identifier - INTEGER(HID_T) :: dataspace2 ! Dataspace identifier - INTEGER(HID_T) :: memspace ! memspace identifier - - INTEGER(HSIZE_T), DIMENSION(1) :: dimsm = (/2/) - ! Memory dataspace dimensions - INTEGER(HSIZE_T), DIMENSION(2) :: dimsf = (/3,4/) - ! File dataspace dimensions - INTEGER(HSIZE_T), DIMENSION(RANK,NUMP) :: coord ! Elements coordinates - ! in the file - - INTEGER, DIMENSION(3,4) :: buf1, buf2, bufnew ! Data buffers - INTEGER, DIMENSION(2) :: val = (/53, 59/) ! Values to write - - INTEGER :: memrank = 1 ! Rank of the dataset in memory - - INTEGER :: i, j - - INTEGER :: error ! Error flag - LOGICAL :: status - - - ! - ! Create two files containing identical datasets. Write 0's to one - ! and 1's to the other. - ! - - ! - ! Data initialization. - ! - do i = 1, 3 - do j = 1, 4 - buf1(i,j) = 0; - end do - end do - - do i = 1, 3 - do j = 1, 4 - buf2(i,j) = 1; - end do - end do - - ! - ! Initialize FORTRAN predefined datatypes - ! - CALL h5open_f(error) - - ! - ! Create file1, file2 using default properties. - ! - CALL h5fcreate_f(filename1, H5F_ACC_TRUNC_F, file1_id, error) - - CALL h5fcreate_f(filename2, H5F_ACC_TRUNC_F, file2_id, error) - - ! - ! Create the data space for the datasets. - ! - CALL h5screate_simple_f(RANK, dimsf, dataspace1, error) - - CALL h5screate_simple_f(RANK, dimsf, dataspace2, error) - - ! - ! Create the datasets with default properties. - ! - CALL h5dcreate_f(file1_id, dsetname1, H5T_NATIVE_INTEGER, dataspace1, & - dset1_id, error) - - CALL h5dcreate_f(file2_id, dsetname2, H5T_NATIVE_INTEGER, dataspace2, & - dset2_id, error) - - ! - ! Write the datasets. - ! - CALL h5dwrite_f(dset1_id, H5T_NATIVE_INTEGER, buf1, error) - - CALL h5dwrite_f(dset2_id, H5T_NATIVE_INTEGER, buf2, error) - - ! - ! Close the dataspace for the datasets. - ! - CALL h5sclose_f(dataspace1, error) - - CALL h5sclose_f(dataspace2, error) - - ! - ! Close the datasets. - ! - CALL h5dclose_f(dset1_id, error) - - CALL h5dclose_f(dset2_id, error) - - ! - ! Close the files. - ! - CALL h5fclose_f(file1_id, error) - - CALL h5fclose_f(file2_id, error) - - ! - ! Open the two files. Select two points in one file, write values to - ! those point locations, then do H5Scopy and write the values to the - ! other file. Close files. - ! - - ! - ! Open the files. - ! - CALL h5fopen_f (filename1, H5F_ACC_RDWR_F, file1_id, error) - - CALL h5fopen_f (filename2, H5F_ACC_RDWR_F, file2_id, error) - - ! - ! Open the datasets. - ! - CALL h5dopen_f(file1_id, dsetname1, dset1_id, error) - - CALL h5dopen_f(file2_id, dsetname2, dset2_id, error) - - ! - ! Get dataset1's dataspace identifier. - ! - CALL h5dget_space_f(dset1_id, dataspace1, error) - - ! - ! Create memory dataspace. - ! - CALL h5screate_simple_f(memrank, dimsm, memspace, error) - - ! - ! Set the selected point positions. Because Fortran array index starts - ! from 1, so add one to the actual select points in C. - ! - coord(1,1) = 1 - coord(2,1) = 2 - coord(1,2) = 1 - coord(2,2) = 4 - - ! - ! Select the elements in file space. - ! - CALL h5sselect_elements_f(dataspace1, H5S_SELECT_SET_F, RANK, NUMP,& - coord, error) - - ! - ! Write value into the selected points in dataset1. - ! - CALL H5dwrite_f(dset1_id, H5T_NATIVE_INTEGER, val, error, & - mem_space_id=memspace, file_space_id=dataspace1) - - ! - ! Copy the daspace1 into dataspace2. - ! - CALL h5scopy_f(dataspace1, dataspace2, error) - - ! - ! Write value into the selected points in dataset2. - ! - CALL H5dwrite_f(dset2_id, H5T_NATIVE_INTEGER, val, error, & - mem_space_id=memspace, file_space_id=dataspace2) - - ! - ! Close the dataspace for the datasets. - ! - CALL h5sclose_f(dataspace1, error) - - CALL h5sclose_f(dataspace2, error) - - ! - ! Close the memoryspace. - ! - CALL h5sclose_f(memspace, error) - - ! - ! Close the datasets. - ! - CALL h5dclose_f(dset1_id, error) - - CALL h5dclose_f(dset2_id, error) - - ! - ! Close the files. - ! - CALL h5fclose_f(file1_id, error) - - CALL h5fclose_f(file2_id, error) - - ! - ! Open both files and print the contents of the datasets. - ! - - ! - ! Open the files. - ! - CALL h5fopen_f (filename1, H5F_ACC_RDWR_F, file1_id, error) - - CALL h5fopen_f (filename2, H5F_ACC_RDWR_F, file2_id, error) - - ! - ! Open the datasets. - ! - CALL h5dopen_f(file1_id, dsetname1, dset1_id, error) - - CALL h5dopen_f(file2_id, dsetname2, dset2_id, error) - - ! - ! Read dataset from the first file. - ! - CALL h5dread_f(dset1_id, H5T_NATIVE_INTEGER, bufnew, error) - - ! - ! Display the data read from dataset "Copy1" - ! - write(*,*) "The data in dataset Copy1 is: " - do i = 1, 3 - print *, (bufnew(i,j), j = 1,4) - end do - - ! - ! Read dataset from the second file. - ! - CALL h5dread_f(dset2_id, H5T_NATIVE_INTEGER, bufnew, error) - - ! - ! Display the data read from dataset "Copy2" - ! - write(*,*) "The data in dataset Copy2 is: " - do i = 1, 3 - print *, (bufnew(i,j), j = 1,4) - end do - - ! - ! Close datasets. - ! - CALL h5dclose_f(dset1_id, error) - - CALL h5dclose_f(dset2_id, error) - - ! - ! Close files. - ! - CALL h5fclose_f(file1_id, error) - - CALL h5fclose_f(file2_id, error) - - ! - ! Close FORTRAN predefined datatypes. - ! - CALL h5close_f(error) - - END PROGRAM SELECTEXAMPLE diff --git a/doc/html/Tutor/extend.html b/doc/html/Tutor/extend.html deleted file mode 100644 index 326a946..0000000 --- a/doc/html/Tutor/extend.html +++ /dev/null @@ -1,284 +0,0 @@ - -HDF5 Tutorial - Chunking and Extendible Datasets - - - - - - - - - [ HDF5 Tutorial Top ] -

      -Chunking and Extendible Datasets -

      - -
      - - -

      Contents:

      - -
      - -

      Creating an Extendible Dataset

      -An extendible dataset is one whose dimensions can grow. -HDF5 allows you to define a dataset to have certain initial dimensions, -then to later increase the size of any of the initial dimensions. -

      -HDF5 requires you to use chunking to define extendible datasets. -This makes it possible to extend datasets efficiently without -having to excessively reorganize storage. -

      -The following operations are required in order to write an extendible dataset: -

        -
      1. Declare the dataspace of the dataset to have unlimited dimensions for all dimensions that might eventually be extended. -
      2. Set dataset creation properties to enable chunking. -
      3. Create the dataset. -
      4. Extend the size of the dataset. -
      -

      Programming Example

      -
      -

      Description

      -This example shows how to create a 3 x 3 extendible dataset, write to that -dataset, extend the dataset to 10x3, and write to the dataset again. -
      -NOTE: To download a tar file of the examples, including a Makefile, -please go to the References page. - - - -

      Remarks

      -

      -

        -
      • The routine H5Pcreate / h5pcreate_f -creates a new property as an instance of - a property list. The signature is as follows: -

        -C: -

        -    hid_t H5Pcreate (H5P_class_t classtype)
        -
        -

        -FORTRAN: -

        -    h5pcreate_f (classtype, prp_id, hdferr) 
        -
        -            classtype  IN: INTEGER 
        -            prp_id    OUT: INTEGER(HID_T)
        -            hdferr    OUT: INTEGER 
        -
        -

        -

          -
        • The parameter classtype is the type of property list to create. - Valid class types are as follows: -
          - - - - - - - - -
          CFORTRAN

          - H5P_FILE_CREATE
          - H5P_FILE_ACCESS
          - H5P_DATASET_CREATE
          - H5P_DATASET_XFER
          - H5P_MOUNT

          -

          - H5P_FILE_CREATE_F
          - H5P_FILE_ACCESS_F
          - H5P_DATASET_CREATE_F
          - H5P_DATASET_XFER_F
          - H5P_MOUNT_F

          -
          -
          -
        • In C, the property list identifier is returned if successful; -otherwise a negative value is returned, if not. -In FORTRAN, the property list identifier is returned in prp_id -and the return value for the call is returned in hdferr. -
        -

        -

      • The routine H5Pset_chunk / h5pset_chunk_f -sets the size of the chunks used - to store a chunked layout dataset. - The signature of this routine is as follows: -

        -C: -

        -    herr_t H5Pset_chunk (hid_t prp_id, int ndims, 
        -                         const hsize_t * dims) 
        -
        -

        -FORTRAN: -

        -    h5pset_chunk_f (prp_id, ndims, dims, hdferr) 
        -
        -            prp_id    IN: INTEGER(HID_T)
        -            ndims     IN: INTEGER
        -            dims      IN: INTEGER(HSIZE_T), DIMENSION(ndims) 
        -            hdferr   OUT: INTEGER
        -
        -
        -

        -

          -
        • The prp_id parameter is the identifier for the property - list to query. -
        • The ndims parameter is the number of dimensions of - each chunk. -
        • The dims parameter is an array containing the size of - each chunk. -
        • In C, a non-negative value is returned if successful; otherwise a - negative value is returned. - In FORTRAN, the return value is returned in hdferr: 0 if - successful and -1 otherwise. -
        -

        -

      • The H5Dextend / h5dextend_f routine -extends a dataset that has an unlimited - dimension. The signature is as follows: -

        -C: -

        -    herr_t H5Dextend (hid_t dset_id, const hsize_t * size) 
        -
        -

        -FORTRAN: -

        -    h5dextend_f (dset_id, size, hdferr) 
        -
        -            dset_id   IN: INTEGER(HID_T) 
        -            size         IN: INTEGER(HSIZE_T), DIMENSION(*)  
        -            hdferr      OUT: INTEGER
        -
        -

        -

          -
        • The dset_id parameter is the dataset identifier. -
        • The size parameter, is an array containing the - new magnitude of each dimension. -
        • In C, this function returns a non-negative value if successful and - a negative value otherwise. - In FORTRAN, the return value is returned in hdferr: - 0 if successful and -1 otherwise. -
        -

        -

      • The H5Dget_create_plist / h5dget_create_plist_f -routine returns an identifier for a -copy of the dataset creation property list for a dataset. -

        -

      • The C function, H5Pget_layout, returns the layout of the raw data for a -dataset. Valid types are H5D_CONTIGUOUS and -H5D_CHUNKED. -A FORTRAN routine for H5Pget_layout does not yet exist. -

        -

      • The H5Pget_chunk / h5pget_chunk_f -routine retrieves the size of chunks -for the raw data of a chunked layout dataset. -The signature is as follows: -

        -C: -

        -    int H5Pget_chunk (hid_t prp_id, int ndims, hsize_t * dims) 
        -
        -

        -FORTRAN: -

        -    h5pget_chunk_f (prp_id, ndims, dims, hdferr)
        -
        -            prp_id    IN: INTEGER(HID_T) 
        -            ndims     IN: INTEGER
        -            dims     OUT: INTEGER(HSIZE_T), DIMENSION(ndims) 
        -            hdferr   OUT: INTEGER 
        -
        -

        -

          - -
        • The prp_id parameter is the identifier of the - property list to query. -
        • The ndims parameter is the size of the dims - array. -
        • The dims parameter is the array in which to store the chunk - dimensions. -
        • In C, this function returns the chunk dimensionality if successful - and a negative value otherwise. - In FORTRAN, the return value is returned in hdferr: - the chunked rank if successful and -1 otherwise. -
        -

        -

      • The H5Pclose / h5pclose_f routine - terminates access to a property list. - The signature is as follows: -

        -C: -

        -    herr_t H5Pclose (hid_t prp_id) 
        -
        -

        -FORTRAN: -

        -    h5pclose_f (prp_id, hdferr) 
        -
        -            prp_id    IN: INTEGER(HID_T) 
        -            hdferr   OUT: INTEGER 
        -
        -

        -

          -
        • The prp_id parameter is the identifier of the property list - to terminate access to. -
        -
      - - - - - - - - - - -


      - -
      NCSA
      - The National Center for Supercomputing Applications

      - University of Illinois - at Urbana-Champaign
      -
      - - -hdfhelp@ncsa.uiuc.edu -
      -
      Last Modified: June 22, 2001

      - - -
      -
      - - - - - - - diff --git a/doc/html/Tutor/fileorg.html b/doc/html/Tutor/fileorg.html deleted file mode 100644 index 9efe0e6..0000000 --- a/doc/html/Tutor/fileorg.html +++ /dev/null @@ -1,102 +0,0 @@ - -HDF5 File Organization - - - - - - - - - [ HDF5 Tutorial Top ] -

      -HDF5 File Organization -

      - - -
      - - -

      -An HDF5 file is a container for storing a variety of scientific data -is composed of two primary types of objects: groups and datasets. -

        -
      • HDF5 group: a grouping structure containing zero or more HDF5 - objects, together with supporting metadata - -
      • HDF5 dataset: a multidimensional array of data elements, together - with supporting metadata -
      -Any HDF5 group or dataset may have an associated attribute list. An HDF5 -attribute is a user-defined HDF5 structure that provides extra information -about an HDF5 object. -

      -Working with groups and datasets is similar in many -ways to working with directories and files in UNIX. As with UNIX directories -and files, an HDF5 object in an HDF5 file is often referred to by its -full path name (also called an absolute path name). -

        - / signifies the root group.
        - /foo signifies a member of the root group called foo. -
        - /foo/zoo signifies a member of the group foo, which in - turn is a member of the root group. -
      -

      - -

      - - - -


      - - NCSA
      - The National Center for Supercomputing Applications

      - University of Illinois - at Urbana-Champaign
      -
      - - -hdfhelp@ncsa.uiuc.edu -
      -
      Last Modified: June 22, 2001

      - - -
      -
      - - - - - - - diff --git a/doc/html/Tutor/footer-ncsalogo.gif b/doc/html/Tutor/footer-ncsalogo.gif deleted file mode 100644 index 6c23ce8..0000000 Binary files a/doc/html/Tutor/footer-ncsalogo.gif and /dev/null differ diff --git a/doc/html/Tutor/glossary.html b/doc/html/Tutor/glossary.html deleted file mode 100644 index 6105150..0000000 --- a/doc/html/Tutor/glossary.html +++ /dev/null @@ -1,261 +0,0 @@ - -HDF5 Tutorial - Glossary - - - - - - - - - [ HDF5 Tutorial Top ] -

      -Glossary -

      - -
      - - -
      -
      ATTRIBUTE -
      An HDF5 attribute is a small dataset that can be used to describe - the nature and/or the intended usage of the object it is attached - to. - -

      -

      BOOT BLOCK -
      HDF5 files are composed of a "boot block" describing information required to portably access files on multiple platforms, followed by information -about the groups in a file and the datasets in the file. The boot block contains information about the size of offsets and lengths of objects, the -number of entries in symbol tables (used to store groups) and additional version information for the file. -

      -

      DATASET -
      An HDF5 dataset is a multi-dimensional array of data elements, - together with supporting metadata. - -

      -

      DATASPACE -
      An HDF5 dataspace is an object that describes the dimensionality - of the data array. A dataspace is either a regular N-dimensional - array of data points, called a simple dataspace, or a more - general collection of data points organized in another - manner, called a complex dataspace. - -

      -

      DATATYPE -
      An HDF5 Datatype is an object that describes the type of the - element in an HDF5 multi-dimensional array. There are two - categories of datatypes: atomic and compound data types. An - atomic type is a type which cannot be decomposed into smaller - units at the API level. A compound is a collection of one or - more atomic types or small arrays of such types. - -

      -

      DATASET CREATION PROPERTY LIST -
      The Dataset Creation Property List contains information on how - raw data is organized on disk and how the raw data is compressed. - The dataset API partitions these terms by layout, compression, - and external storage: -
        - Layout: -
          -
        • H5D_COMPACT: Data is small and can be stored in object header (not - implemented yet). This eliminates disk seek/read requests. -
        • H5D_CONTIGUOUS: (default) The data is large, non-extendible, - non-compressible, non-sparse, and can be stored - externally. -
        • H5D_CHUNKED: The data is large and can be extended in any dimension. - It is partitioned into chunks so each chunk is the same - logical size. -
        -Compression: (gzip compression)
        -External Storage Properties: The data must be contiguous to be stored - externally. It allows you to store the data - in one or more non-HDF5 files. - -
      - -

      -

      DATA TRANSFER PROPERTY LIST -
      The data transfer property list is used to control various aspects - of the I/O, such as caching hints or collective I/O information. -

      -

      DDL -
      DDL is a Data Description Language that describes HDF5 objects - in Backus-Naur Form. - -

      - -

      FILE ACCESS MODES -
      The file access modes determine whether an existing file will be -overwritten. All newly created files are opened for both reading and -writing. Possible values are: -
      -  H5F_ACC_RDWR:   Allow read and write access to file. 
      -  H5F_ACC_RDONLY: Allow read-only access to file. 
      -  H5F_ACC_TRUNC:  Truncate file, if it already exists, erasing all data 
      -                  previously stored in the file. 
      -  H5F_ACC_EXCL:   Fail if file already exists. 
      -  H5F_ACC_DEBUG:  Print debug information. 
      -  H5P_DEFAULT:    Apply default file access and creation properties. 
      -
      -

      -

      FILE ACCESS PROPERTY LIST -
      File access property lists are used to control different methods - of performing I/O on files: -
        -Unbuffered I/O: Local permanent files can be accessed with the functions - described in Section 2 of the Posix manual, namely open(), lseek(), read(), - write(), and close().
        -Buffered I/O: Local permanent files can be accessed with the functions - declared in the stdio.h header file, namely fopen(), fseek(), fread(), - fwrite(), and fclose().
        -Memory I/O: Local temporary files can be created and accessed directly from - memory without ever creating permanent storage. The library uses malloc() - and free() to create storage space for the file
        -Parallel Files using MPI I/O: This driver allows parallel access to a file - through the MPI I/O library. The parameters which can be modified are the - MPI communicator, the info object, and the access mode. The communicator - and info object are saved and then passed to MPI_File_open() during file - creation or open. The access_mode controls the kind of parallel access the - application intends.
        -Data Alignment: Sometimes file access is faster if certain things are aligned - on file blocks. This can be controlled by setting alignment properties of - a file access property list with the H5Pset_alignment() function. -
      -

      -

      FILE CREATION PROPERTY LIST -
      The file creation property list is used to control the file - metadata. The parameters that can be modified are: -
        - User-Block Size: The "user-block" is a fixed length block of data located - at the beginning of the file which is ignored by the HDF5 library and may - be used to store any data information found to be useful to applications. -
        - Offset and Length Sizes: The number of bytes used to store the offset and - length of objects in the HDF5 file can be controlled with this parameter. - Symbol Table Parameters: The size of symbol table B-trees can be controlled - by setting the 1/2 rank and 1/2 node size parameters of the B-tree. -
        - Indexed Storage Parameters: The size of indexed storage B-trees can be - controlled by setting the 1/2 rank and 1/2 node size parameters of the - B-tree. -
      -

      - -

      GROUP -
      A Group is a structure containing zero or more HDF5 objects, - together with supporting metadata. The two primary HDF5 objects - are datasets and groups. -

      - -

      HDF5 -
      HDF5 is an abbreviation for Hierarchical Data Format Version 5. - This file format is intended to make it easy to write and read - scientific data -

      -

        -
      • by including the information needed to understand the data - within the file -

        -

      • by providing a library of C, FORTRAN, and other language - programs that reduce the work required to provide efficient - writing and reading - even with parallel IO -
      -

      - -

      HDF5 FILE -
      An HDF5 file is a container for storing grouped collections - of multi-dimensional arrays containing scientific data. -

      - -

      H5DUMP -
      h5dump is an HDF5 tool that describes the HDF5 file contents in DDL. -

      - -

      HYPERSLAB -
      -A hyperslab is a portion of a dataset. A hyperslab selection can be a -logically contiguous collection of points in a dataspace, or it -can be a regular pattern of points or blocks in a dataspace. -

      -

      MOUNTING FILES -
      -HDF5 allows you to combine two or more HDF5 files in a manner similar -to mounting files in UNIX. The group structure and metadata -from one file appear as though they exist in another file. -

      - -

      NAMES -
      HDF5 object names are a slash-separated list of components. A name - which begins with a slash is an absolute name which is accessed - beginning with the root group of the file while all other relative - names are accessed beginning with the specified group. -

      -

      PARALLEL I/O (HDF5) -
      The parallel I/O version of HDF5 supports parallel file access using -MPI (Message Passing Interface). -

      - -

      REFERENCE -
      -OBJECT REFERENCE:
      - A reference to an entire object in the current HDF5 file. -

      - An object - reference points to an entire object in the current HDF5 file by storing - the relative file address (OID) of the object header for the object - pointed to. The relative file address of an object header is constant - for the life of the object. An object reference is of a fixed size in - the file. -

      -DATASET REGION REFERENCE:
      - Reference to a specific dataset region. -

      - A dataset region reference points to a region of a dataset in the - current HDF5 file by storing the OID of the dataset and the global - heap offset of the region referenced. The region referenced is - located by retrieving the coordinates of the areas in the region - from the global heap. A dataset region reference is of a variable - size in the file. -

      -

      THREADSAFE (HDF5) -
      A "thread-safe" version of HDF-5 (TSHDF5) is one that can be called from any thread of a multi-threaded program. Any calls to HDF -can be made in any order, and each individual HDF call will perform correctly. A calling program does not have to explicitly lock the HDF -library in order to do I/O. Applications programmers may assume that the TSHDF5 guarantees the following: -
        -
      • the HDF-5 library does not create or destroy threads. -
      • the HDF-5 library uses modest amounts of per-thread private memory. -
      • the HDF-5 library only locks/unlocks it's own locks (no locks are passed in or returned from HDF), and the internal locking is guaranteed to be deadlock free. -
      -

      -These properties mean that the TSHDF5 library will not interfere with an application's use of threads. A TSHDF5 library is the same -library as regular HDF-5 library, with additional code to synchronize access to the HDF-5 library's internal data structures. - -

      - - - -


      - - NCSA
      - The National Center for Supercomputing Applications

      - University of Illinois - at Urbana-Champaign
      -
      - - -hdfhelp@ncsa.uiuc.edu -
      -
      Last Modified: June 22, 2001

      - -
      -
      - - - - - - - diff --git a/doc/html/Tutor/img001.gif b/doc/html/Tutor/img001.gif deleted file mode 100644 index b79c6d6..0000000 Binary files a/doc/html/Tutor/img001.gif and /dev/null differ diff --git a/doc/html/Tutor/img002.gif b/doc/html/Tutor/img002.gif deleted file mode 100644 index 67585ef..0000000 Binary files a/doc/html/Tutor/img002.gif and /dev/null differ diff --git a/doc/html/Tutor/img003.gif b/doc/html/Tutor/img003.gif deleted file mode 100644 index ac1dcf9..0000000 Binary files a/doc/html/Tutor/img003.gif and /dev/null differ diff --git a/doc/html/Tutor/img004.gif b/doc/html/Tutor/img004.gif deleted file mode 100644 index d48dbab..0000000 Binary files a/doc/html/Tutor/img004.gif and /dev/null differ diff --git a/doc/html/Tutor/img005.gif b/doc/html/Tutor/img005.gif deleted file mode 100644 index 3383dc6..0000000 Binary files a/doc/html/Tutor/img005.gif and /dev/null differ diff --git a/doc/html/Tutor/index.html b/doc/html/Tutor/index.html deleted file mode 100644 index 15cfa5b..0000000 --- a/doc/html/Tutor/index.html +++ /dev/null @@ -1,29 +0,0 @@ - - -HDF5 Tutorial - - - - - - - - - - <b>HDF5 Tutorial</b> - <p> - If you are reading this message, your browser is not capable of - interpreting HTML frames. A no-frames version of the tutorial - is available by viewing the file <a href="title.html">title.html</a>. - <p> - If you owuld like to upgrade to a frames-capable browser, - we suggest upgrading to the most recent version of - Nestscape Communicator, Microsoft Internet Explorer, or - an equivalent browser. - <p> - In the meantime, you can view this tutorial by starting with the - file <a href="title.html">title.html</a>. - - - - diff --git a/doc/html/Tutor/intro.html b/doc/html/Tutor/intro.html deleted file mode 100644 index 47285ae..0000000 --- a/doc/html/Tutor/intro.html +++ /dev/null @@ -1,92 +0,0 @@ - -HDF5 Tutorial - Introduction - - - - - - - - - [ HDF5 Tutorial Top ] -

      -Introduction -

      - -
      - - -Welcome to the HDF5 Tutorial provided by the HDF User Support Group. -

      -HDF5 is a file format and library for storing scientific data. -It was designed and implemented - to meet growing and ever-changing scientific data-storage - and data-handling needs, - to take advantage of the power and features of today's - computing systems, and - to address the deficiencies of HDF4.x. -HDF5 has a powerful and flexible data model, - supports files larger than 2 GB (the limit of HDF4.x files), and - supports parallel I/O. -Thread-safety is designed and is to be implemented in the near future. -For a short overview of the HDF5 data model, library, and tools, see -the slide presentation at the following URL: -

      -   http://hdf.ncsa.uiuc.edu/HDF5/papers/HDF5_overview/index.htm
      -
      -This tutorial covers the basic HDF5 data objects and file structure, -the HDF5 programming model, and the API functions necessary for creating and -modifying data objects. It also introduces the available HDF5 tools for accessing -HDF5 files. -

      -The examples used in this tutorial, along with a Makefile to compile them, -can be found in ./examples/. You can also download -a tar -file with the examples and Makefile. -To use the Makefile, you may have to edit it and update the -compiler and compiler options, as well as the path for the HDF5 -binary distribution. -The Java examples can be found in -a subdirectory of the ./examples/ directory called java/. The java/ -directory contains a Makefile and shell scripts for running the java -programs. -

      -Please check the References for pointers to -other examples of HDF5 Programs. -

      -We hope that the step-by-step examples and instructions will give you a quick -start with HDF5. -

      -Please send your comments and suggestions to hdfhelp@ncsa.uiuc.edu. - - - - - - - -


      - - NCSA
      - The National Center for Supercomputing Applications

      - University of Illinois - at Urbana-Champaign
      -
      - - -hdfhelp@ncsa.uiuc.edu -
      -
      Last Modified: June 22, 2001

      - - -
      -
      - - - - - - - diff --git a/doc/html/Tutor/iterate.html b/doc/html/Tutor/iterate.html deleted file mode 100644 index 3311509..0000000 --- a/doc/html/Tutor/iterate.html +++ /dev/null @@ -1,290 +0,0 @@ - -HDF5 Tutorial - Iterating over Group Members - - - - - - - - - [ HDF5 Tutorial Top ] -

      -Iterating over Group Members -

      - -
      - - -

      Contents:

      - -
      - -

      How to Iterate over Group Members Using C

      -This section discusses how to find names and object types of HDF5 group -members using C. -

      -The HDF5 Group interface includes the H5Giterate function, -which iterates over the group members. -

      -Operations on each group member can be performed during the iteration process -by passing the operator function and its data to the iterator as parameters. -There are no restrictions on what kind of operations can be performed on -group members during the iteration procedure. -

      -The following steps are involved: -

        - -
      1. Write an operator function which will be used during the iteration process. - The HDF5 library defines the operator function signature and return values. -
      2. Open the group to iterate through. -
      3. Iterate through the group or just a few members of the group. -
      -
      -

      How to Iterate Over Group Members using FORTRAN

      -There is no FORTRAN call to iterate over group members. -Instead, this functionality is provided by two FORTRAN calls: -
        -
      • hgn_members_f returns the number of group members. -
      • h5gget_obj_info_idx_f returns the name and type of the - group member, which is identified by its index. -
      -

      -

      Programming Example

      -
      -

      Description

      -In this example we iterate through the members of the root group. -
      -NOTE: To download a tar file of the examples, including a Makefile, -please go to the References page. -

      -Following is the output from these examples: -

      -Output from C Example -

      -  Objects in the root group are:
      -
      -  Object with name Dataset1 is a dataset 
      -  Object with name Datatype1 is a named datatype 
      -  Object with name Group1 is a group 
      -
      -Output from FORTRAN Example -
      - Number of root group member is 1
      - MyGroup             1
      - Number of group MyGroup member  is 2
      - Group_A             1
      - dset1               2
      - Number of group MyGroup/Group_A member  is 1
      - dset2               2
      -
      - -

      Remarks for C Example

      -

      -

        -
      • The operator function in this example is called file_info. - The signature of the operator function is as follows: -
        -    herr_t *(H5G_operator_t) (hid group_id, const char* name, 
        -             void *operator_data)
        -
        -
          -
        • The group_id parameter is a group identifier for the - group being iterated over. - It is passed to the operator by the iterator function, - H5Giterate. -

          -

        • The name parameter is the name of the current object. - The name is passed to the operator function by the HDF5 library. -

          -

        • The operator_data parameter is the operator data. - It is passed to and from - the operator by the iterator, H5Giterate. -
        -

        - The operator function in this example simply prints the name and type - of the current object and then exits. - This information can also be used to open the object and perform - different operations or queries. For example a named datatype object's - name can be used to open the datatype and query its properties. -

        - The operator return value defines the behavior of the iterator. -

          -

          -

        • A zero return value causes the iterator to continue, returning - zero when all group members have been processed. -

          -

        • A positive value causes the iterator to immediately return that - value, indicating a short-circuit success. The iterator can be restarted - at the next group member. -

          -

        • A negative value causes the iterator to immediately return that value, - indicating failure. The iterator can be restarted at the next group - member. -
        -

        - In this example the operator function returns 0, which causes the iterator - to continue and go through all group members. -

        -

      • The function H5Gget_objinfo is used to determine the type of the object. - It also returns the modification time, number of hard links, and some - other information. -

        - The signature of this function is as follows: -

        -     herr_t H5Gget_objinfo (hid_t loc_id, const char * name, 
        -                            hbool_t follow_link, 
        -                            H5G_stat_t *statbuf)
        -
        -
          -
        • The loc_id and name arguments - specify the object by its location and name. - This example uses the group identifier and name relative to the group - to specify the object. -

          -

        • The follow_link argument is a flag which indicates - whether a symbolic link should be followed. A zero value indicates - that information should be returned for the link itself, but not - about the object it points to. -

          - The root group in this example does not have objects that are - links, so this flag is not important for our example. -

          -

        • The statbuf argument is the buffer in which to return - information. - Type information is returned into the field type of the - H5G_stat_t data structure (statbuf.type). - Valid values are - H5G_GROUP, H5G_DATASET, - H5G_TYPE, and H5G_LINK. -
        -

        -

      • The H5Giterate function has the following signature: -
        -    int H5Giterate (hid_t loc_id, const char *name , int *idx,
        -                    H5G_operator_t operator, void * operator_data) 
        -
        -
          -
        • The loc_id parameter is the group identifier for the - group being iterated over. -
        • The name parameter is the group name. -
        • The idx parameter is an index specifying that iteration - begins with the idx-th object in the group. - Upon the function's return, the index of the next element - to be processed is returned in idx. In our example, NULL is - used to start at the first group member. Since no stopping point - is returned in this case, the iterator cannot be restarted if one - of the calls to its operator returns a non-zero value. -
        • The operator parameter is the operator function. -
        • The operator_data argument is the operator data. - We used NULL since no data was passed to or from the operator. -
        -
      - -
      -

      Remarks for FORTRAN Example

      -

      -

        -
      • This program creates an HDF5 file with groups in it and - then uses h5gn_members_f to get the number of members in - each group and h5gget_obj_idx_f to obtain the group member's - name and type. -

        -

      • The number of members in a group are obtained with the -h5gn_members_f call: -
        -    h5gn_members_f (loc_id, name, nmembers, hdferr)
        -     
        -            loc_id    IN: INTEGER (HID_T) 
        -            name      IN: CHARACTER (LEN=*) 
        -            nmembers OUT: INTEGER
        -            hdferr   OUT: INTEGER 
        -
        -
          -
        • The loc_id parameter is the file or group identifier. -
        • The name parameter is the name of the group to obtain the number - of members in. -
        • The number of members in the group is returned in nmembers. -
        • The hdferr parameter contains the return code from the - call: 0 if successful and -1 otherwise. -
        -

        -

      • The name of each group and its type are obtained with the -h5gget_obj_info_idx_f call: -
        -    h5gget_obj_info_idx_f (loc_id, name, idx, &
        -                           obj_name, obj_type, hdferr)
        -
        -            loc_id     IN: INTEGER (HID_T)
        -            name       IN: CHARACTER (LEN=*)
        -            idx        IN: INTEGER
        -            obj_name  OUT: CHARACTER (LEN=*)
        -            obj_type  OUT: INTEGER
        -            hdferr    OUT: INTEGER
        - 
        -
          -
        • The loc_id parameter is the file or group identifier. -
        • The name parameter is the name of the group. -
        • The idx parameter is the index of the member object. -
        • The obj_name parameter is the name of the object that gets returned. -
        • The obj_type parameter is the object type that gets returned. - Valid values are as follows: -
          -        H5G_LINK_F 
          -        H5G_GROUP_F 
          -        H5G_DATASET_F 
          -        H5G_TYPE_F 
          -
          -
        • The hdferr parameter contains the return code from the - call: 0 if successful and -1 otherwise. -
        -
      - - - - - - -


      - -
      NCSA
      - The National Center for Supercomputing Applications

      - University of Illinois - at Urbana-Champaign
      -
      - - -hdfhelp@ncsa.uiuc.edu -
      -
      Last Modified: June 22, 2001

      - - -
      -
      - - - - - - - diff --git a/doc/html/Tutor/mount.html b/doc/html/Tutor/mount.html deleted file mode 100644 index 1094231..0000000 --- a/doc/html/Tutor/mount.html +++ /dev/null @@ -1,255 +0,0 @@ - -HDF5 Tutorial - Mounting Files - - - - - - - - - [ HDF5 Tutorial Top ] -

      -Mounting Files -

      - -
      - - -

      Contents:

      - -
      - -

      Mounting Files

      - -HDF5 allows you to combine two or more HDF5 files in memory -in a manner similar to mounting files in UNIX. -The group structure and metadata from one file appear as though -they exist in another file. The following steps are involved: -
        -
      1. Open the files. - -
      2. Choose the mount point in the first file - (the parent file). The mount point in - HDF5 is a group, which CANNOT be the root group. - -
      3. Use the HDF5 routine H5Fmount / h5fmount_f - to mount the second file (the child file) in the first file. - -
      4. Work with the objects in the second file as if they were members of - the mount point group in the first file. The previous contents of - the mount point group are temporarily hidden. - -
      5. Unmount the second file using H5Funmount / - h5funmount_f when the work is done. -
      - -

      Programming Example

      -
      -

      Description

      - -In the following example, we create one file containing a group and -another file containing a dataset. -Mounting is used to access the dataset from the second -file as a member of a group in the first file. -The following figures illustrate this concept. -
      -
      -             FILE1                                   FILE2
      -  
      -      --------------------                   --------------------
      -      !                  !                   !                  !
      -      !      /           !                   !       /          !
      -      !       |          !                   !        |         !
      -      !       |          !                   !        |         !
      -      !       V          !                   !        V         !
      -      !     --------     !                   !     ----------   !
      -      !     ! Group !    !                   !     ! Dataset!   !
      -      !     ---------    !                   !     ----------   !
      -      !------------------!                   !------------------! 
      -
      -After mounting FILE2 under the group in FILE1, -the parent file has the following structure: -
      - 
      -                                FILE1                                 
      -  
      -                         --------------------                   
      -                         !                  !                  
      -                         !      /           !               
      -                         !       |          !            
      -                         !       |          !         
      -                         !       V          !    
      -                         !     --------     !              
      -                         !     ! Group !    !            
      -                         !     ---------    !           
      -                         !         |        !
      -                         !         |        !
      -                         !         V        !
      -                         !    -----------   !
      -                         !    ! Dataset !   !
      -                         !    !----------   !
      -                         !                  !
      -                         !------------------!                    
      -
      -
      -[
      C program ] - - h5_mount.c
      -[ FORTRAN program ] - - mountexample.f90 -

      - -NOTE: To download a tar file of the examples, including a Makefile, -please go to the References page. - - -

      Remarks

      -
        -
      • The first part of the program creates a group in one file and creates - and writes a dataset to another file. -

        -

      • Both files are reopened and the second file is mounted in the first - using H5Fmount / h5fmount_f. - If no objects will be modified, the - files can be opened with H5F_ACC_RDONLY - (H5F_ACC_RDONLY_F in FORTRAN). - If the data is to be modified, the files should be opened with - H5F_ACC_RDWR (H5F_ACC_RDWR_F in FORTRAN). -

        -C: -

        -    herr_t H5Fmount (hid_t loc_id, const char *dsetname, 
        -                     hid_t file_id, hid_t access_prp)  
        -
        -

        -FORTRAN: -

        -    h5fmount_f (loc_id, dsetname, file_id, hdferr, access_prp)
        -
        -            loc_id       IN: INTEGER (HID_T) 
        -            dsetname     IN: CHARACTER (LEN=*)
        -            file_id      IN:  INTEGER (HID_T)
        -            hdferr      OUT: INTEGER
        -            access_prp   IN: INTEGER (HID_T), OPTIONAL
        -                         (Default value: H5P_DEFAULT_F)  
        -
        -

        -

          -
        • The loc_id and dsetname arguments - specify the location of the mount point. - In this example, the mount point is a group /G in the - specified file. Since the group /G is in the root - group of the first file, one can also use just G to - identify it. -

          - Below is a description of another scenario: -

          - Suppose the group G were a member of - the group H in the first file. - Then the mount point G can be specified in - two different ways: -

          -

            -
          • loc_id is the file identifier for the first file.
            - dsetname is H/G. -

            -

          • loc_id is the identifier for the group H.
            - dsetname is G. -
          -

          -

        • The file_id argument is the identifier for the file - which will be mounted. - Only one file can be mounted per mount point. -

          -

        • The access_prp argument is the identifier for the property list - to be used. Currently, only the default property list, - H5P_DEFAULT, can be used in C. - In FORTRAN, this argument can be omitted or - H5P_DEFAULT_F can be used. -

          -

        • The C function H5Fmount returns a non-negative - value if successful and a negative value otherwise. - With the FORTRAN routine, h5fmount_f, - the return value of the call is returned in hdferr: - 0 if successful and -1 otherwise. -
        -

        -

      • In this example, we only read data from the dataset D. - One can also modify data. - If the dataset is modified while the file is mounted, it is - modified in the original file after the file is unmounted. -

        -

      • The file is unmounted with H5Funmount / -h5funmount_f: -

        -C: -

        -    herr_t H5Funmount (hid_t loc_id, const char *dsetname)
        -
        -

        -FORTRAN: -

        -    h5funmount_f (loc_id, dsetname, hdferr)
        -
        -            loc_id     IN: INTEGER (HID_T)
        -            dsetname   IN: CHARACTER (LEN=*)
        -            hdferr    OUT: INTEGER
        -
        -

        -

          -
        • The loc_id and dsetname arguments specify the location - of the mount point. - In our example loc_id is the first file's file identifier - and dsetname is the name of group /G. -
        -

        -

      • Note that H5Funmount / h5funmount_f - does not close files. Files are closed with the respective calls to - the H5Fclose / h5fclose_f function. -

        -

      • Closing the parent file automatically unmounts the child file. -

        -

      • The h5dump utility cannot display files in memory. - Therefore, no output of FILE1 after FILE2 - was mounted is provided. -
      - - - - - -


      - -
      NCSA
      - The National Center for Supercomputing Applications

      - University of Illinois - at Urbana-Champaign
      -
      - - -hdfhelp@ncsa.uiuc.edu -
      -
      Last Modified: June 22, 2001

      - - -
      -
      - - - - - - - diff --git a/doc/html/Tutor/property.html b/doc/html/Tutor/property.html deleted file mode 100644 index 13035f2..0000000 --- a/doc/html/Tutor/property.html +++ /dev/null @@ -1,167 +0,0 @@ - -HDF5 Tutorial - Property Lists - - - - - - - - - [ HDF5 Tutorial Top ] -

      -Property Lists -

      - -
      - - - -

      -The property list interface provides a mechanism for adding functionality -to HDF5 calls, without increasing the number of arguments used -for a given call. -

      -A property list is a collection of values which can -be passed to various HDF5 functions to control features that -are typically unimportant or whose default values are usually used -(by specifying H5P_DEFAULT / H5P_DEFAULT_F). -

      -It supports unusual cases when: - -

      - - - -

      Creating Files

      -The File Creation property list, H5P_FILE_CREATE, applies to H5Fcreate() -only and is used to control the file metadata which is maintained in the -super block of the file. The parameters that can be modified are: -user-block size, offset and length sizes, symbol table parameters, -and index storage parameters. -

      -The following example shows how to create a file with 64-bit object -offsets and lengths: -

      -        hid_t create_plist;
      -        hid_t file_id;
      -
      -        create_plist = H5Pcreate(H5P_FILE_CREATE);
      -        H5Pset_sizes(create_plist, 8, 8);
      -
      -        file_id = H5Fcreate("test.h5", H5F_ACC_TRUNC,
      -                             create_plist, H5P_DEFAULT);
      -        .
      -        .
      -        .
      -        H5Fclose(file_id);
      -
      - -
      -

      Accessing Files

      -The File Access property list, H5P_FILE_ACCESS, applies to H5Fcreate() and -H5Fopen() and is used to control different methods of -performing I/O on files. The different types of I/O are: unbuffered I/O, -buffered I/O, memory I/O, parallel files using MPI I/O, and data alignment. -

      -Following is an example of using the H5P_FILE_ACCESS property list for creating -HDF5 files with the metadata and data split into different files: -
            -[
      C program ] - - h5split.c
      -

      - - -

      Creating Datasets

      -The Dataset Creation property list, H5P_DATASET_CREATE, applies to -H5Dcreate() and controls information on how raw data -is organized on disk and how the raw data is compressed. The dataset API -partitions these terms by layout, compression, and external storage: -

      -

      - - -

      Reading or Writing Data

      - -The Data Transfer property list, H5P_DATASET_XFER, is used to control -various aspects of I/O, such as caching hints or collective I/O information. -

      -The following code sets the maximum size for the type conversion buffer -and background buffer: -

      -   plist_xfer = H5Pcreate (H5P_DATASET_XFER);
      -   H5Pset_buffer(plist_xfer, (hsize_t)NX*NY*NZ, NULL, NULL);
      -   status = H5Dread (dataset, H5T_NATIVE_UCHAR, memspace, dataspace,
      -                      plist_xfer);
      -
      - - - - -


      - -
      NCSA
      - The National Center for Supercomputing Applications

      - University of Illinois - at Urbana-Champaign
      -
      - - -hdfhelp@ncsa.uiuc.edu -
      -
      Last Modified: June 22, 2001

      - -
      -
      - - - - - - - diff --git a/doc/html/Tutor/questions.html b/doc/html/Tutor/questions.html deleted file mode 100644 index d0d3b51..0000000 --- a/doc/html/Tutor/questions.html +++ /dev/null @@ -1,159 +0,0 @@ - -HDF5 Tutorial - Introductory Topics Quiz - - - - - - - - - [ HDF5 Tutorial Top ] -

      -Introductory Topics Quiz -

      - -
      - - -

      Section 2: HDF File Organization

      - -
        -
      1. Name and describe the two primary objects that can be stored in an HDF5 - file. - -

        -

      2. What is an attribute? - -

        -

      3. Give the path name for an object called harry that is a member of a - group called dick, which, in turn, is a member of the root group. -
      - - -

      Section 3: The HDF5 API

      - -
        -
      1. Describe the purpose of each of the following HDF5 APIs: - - H5A, H5D, H5E, H5F, H5G, H5T, H5Z - -
      - - -

      Section 4: Creating an HDF5 File

      - -
        -
      1. What two HDF5 routines must be called to create an HDF5 file? - -

        -

      2. What include file must be included in any file that uses the HDF5 library? - -

        -

      3. An HDF5 file is never completely empty because as soon as it is created, - it automatically contains a certain primary object. What is that object? -
      - - -

      Section 5: Creating a Dataset

      - -
        -
      1. Name and describe two major datatype categories. - -

        -

      2. List the HDF5 atomic datatypes. Give an example of a predefined datatype. - -

        -

      3. What does the dataspace describe? What are the major characteristics of - the simple dataspace? - -

        -

      4. What information needs to be passed to the H5Dcreate - function, i.e., what information is needed to describe a dataset at - creation time? -
      - - -

      Section 6: Reading from and Writing to a Dataset

      - -
        -
      1. What are six pieces of information which need to be specified for - reading and writing a dataset? - -

        -

      2. Why are both the memory dataspace and file dataspace needed for - read/write operations, while only the memory datatype is required? - -

        -

      3. What does the line -
            - DATASPACE { SIMPLE (4 , 6 ) / ( 4 , 6 ) } -
        in Figure 6.1 mean? -
      - - -

      Section 7: Creating an Attribute

      - -
        -
      1. What is an attribute? - -

        -

      2. Can partial I/O operations be performed on attributes? -
      - - -

      Section 8: Creating a Group

      - -
        -
      1. What are the two primary objects that can be included in a group? -
      - - -

      Section 9: Creating Groups Using Absolute and Relative Names

      - -
        -
      1. Group names can be specified in two ways. What are these two types - of group names? - -

        -

      2. You have a dataset named moo in the group boo, which is - in the group foo, which, in turn, is in the root group. - How would you specify an absolute name to access this dataset? -
      - - -

      Section 10: Creating Datasets in Groups

      - -
        -
      1. Describe a way to access the dataset moo described in the -previous section (Section 9, question 2) using a relative name. -Describe a way to access the same dataset using an absolute name. -
      - - - - -


      - - NCSA
      - The National Center for Supercomputing Applications

      - University of Illinois - at Urbana-Champaign
      -
      - - -hdfhelp@ncsa.uiuc.edu -
      -
      Last Modified: June 22, 2001

      - -
      -
      - - - - - - - diff --git a/doc/html/Tutor/rdwt.html b/doc/html/Tutor/rdwt.html deleted file mode 100644 index 391a0bc..0000000 --- a/doc/html/Tutor/rdwt.html +++ /dev/null @@ -1,409 +0,0 @@ - -HDF5 Tutorial - Reading from and Writing to a Dataset - - - - - - - - - [ HDF5 Tutorial Top ] -

      -Reading from and Writing to a Dataset -

      - -
      - - -

      Contents:

      - -
      - -

      Reading from and Writing to a Dataset

      -

      -During a dataset I/O operation, the library transfers raw data between memory -and the file. The data in memory can have a datatype different from that of -the file and can also be of a different size -(i.e., the data in memory is a subset of the dataset elements, or vice versa). -Therefore, to perform read or write operations, the application -program must specify: -

        -
      • The dataset - -
      • The dataset's datatype in memory - -
      • The dataset's dataspace in memory - -
      • The dataset's dataspace in the file - -
      • The dataset transfer property list - (The dataset transfer property list controls various aspects of the - I/O operations, such as the number of processes participating in a - collective I/O request or hints to the library to control caching of - raw data. In this tutorial, we use the default dataset transfer - property list.) - -
      • The data buffer -
      - - -

      -The steps to read from or write to a dataset are -as follows: -

        -
      1. Obtain the dataset identifier. -
      2. Specify the memory datatype. -
      3. Specify the memory dataspace. -
      4. Specify the file dataspace. -
      5. Specify the transfer properties. -
      6. Perform the desired operation on the dataset. -
      7. Close the dataset. -
      8. Close the dataspace, datatype, and property list if necessary. -
      - -To read from or write to a dataset, -the H5Dread/h5dread_f and -H5Dwrite/h5dwrite_f -routines are used.

      -C: -

      -   status = H5Dread (set_id, mem_type_id, mem_space_id, file_space_id,
      -                     xfer_prp, buf );
      -   status = H5Dwrite (set_id, mem_type_id, mem_space_id, file_space_id,
      -                     xfer_prp, buf);
      -
      -
      -FORTRAN: -
      -   CALL h5dread_f(dset_id, mem_type_id, buf, error, &
      -                     mem_space_id=mspace_id, file_space_id=fspace_id, &
      -                     xfer_prp=xfer_plist_id)
      -        or
      -   CALL h5dread_f(dset_id, mem_type_id, buf, error)
      -
      -
      -   CALL h5dwrite_f(dset_id, mem_type_id, buf, error, &
      -                     mem_space_id=mspace_id, file_space_id=fspace_id, &
      -                     xfer_prp=xfer_plist_id)
      -        or
      -   CALL h5dwrite_f(dset_id, mem_type_id, buf, error)
      -
      - - -

      -

      Programming Example

      -
      -

      Description

      -The following example shows how to read and write an existing dataset. -It opens the file created in the previous example, obtains the dataset -identifier for the dataset /dset, -writes the dataset to the file, then reads the dataset back from -memory. It then closes the dataset and file.
      -
      - -NOTE: To download a tar file of the examples, including a Makefile, -please go to the References page. - - - -

      Remarks

      -
        -
      • H5Fopen/h5fopen_f opens an existing file and - returns a file identifier. -
        -C:
        -  hid_t H5Fopen (const char *name, unsigned access_mode, hid_t access_prp) 
        -
        -FORTRAN:
        -  h5fopen_f (name, access_mode, file_id, hdferr, access_prp)
        -
        -        name         CHARACTER(LEN=*)
        -        access_mode  INTEGER
        -                     (Possible values: H5F_ACC_RDWR_F, H5F_ACC_RDONLY_F)
        -        file_id      INTEGER(HID_T)
        -        hdferr       INTEGER
        -                     (Possible values: 0 on success and -1 on failure)
        -        access_prp   INTEGER(HID_T), OPTIONAL
        -
        -
        -
          -
        • The argument name is the filename. -

          -

        • The access_mode parameter is the file access mode. - H5F_ACC_RDWR in C - (H5F_ACC_RDWR_F in FORTRAN) - allows read/write access - while H5F_ACC_RDONLY in C - (H5F_ACC_RDONLY_F in FORTRAN) - allows read-only access. - -

          -

        • The access_prp parameter identifies the file access property list. - H5P_DEFAULT in C and H5P_DEFAULT_F in FORTRAN - specify the default file access property list. - This parameter is optional in FORTRAN; if it is omitted, the default file - access property list is used. - -

          -

        • In FORTRAN, the return code is passed back in the hdferr - parameter: 0 if successful, -1 if not. In C, the function returns - the file identifier if successful, and a negative value otherwise. -
        -

        -

      • H5Dopen/h5dopen_f opens an existing dataset - with the name specified by name at the location specified by - loc_id. - For FORTRAN, the return value is passed in the hdferr parameter: - 0 if successful, -1 if not. For C, the function returns the dataset - identifier if successful, and a negative value if not. -

        -C: -

        -  hid_t H5Dopen (hid_t loc_id, const char *name) 
        -
        -FORTRAN: -
        -  h5dopen_f(loc_id, name, hdferr) 
        -
        -        loc_id   INTEGER(HID_T) 
        -        name     CHARACTER(LEN=*) 
        -        hdferr   INTEGER 
        -                 (Possible values: 0 on success and -1 on failure)
        -
        - -

        -

      • H5Dwrite/h5dwrite_f writes raw data - from an application buffer to the specified - dataset, converting from the datatype and dataspace of the dataset in - memory to the datatype and dataspace of the dataset in the file. -

        -C: -

        -  herr_t H5Dwrite (hid_t dset_id, hid_t mem_type_id, hid_t mem_space_id, 
        -                   hid_t file_space_id, hid_t xfer_prp, const void * buf) 
        -
        -FORTRAN: -
        -  h5dwrite_f (dset_id, mem_type_id, buf, hdferr, mem_space_id, &
        -                                       file_space_id, xfer_prp)
        -
        -        dset_id        INTEGER(HID_T)
        -        mem_type_id    INTEGER(HID_T)
        -        buf(*,...*)    TYPE
        -        hdferr         INTEGER 
        -                       (Possible values: 0 on success and -1 on failure)
        -        mem_space_id   INTEGER(HID_T), OPTIONAL
        -                       (Default value: H5S_ALL_F)
        -        file_space_id  INTEGER(HID_T), OPTIONAL
        -                       (Default value: H5S_ALL_F)
        -        xfer_prp       INTEGER(HID_T), OPTIONAL
        -                       (Default value: H5P_DEFAULT_F)
        -
        -
          -
        • The dset_id is the dataset identifier. -

          - -

        • The mem_type_id parameter is the identifier of the dataset's - memory datatype. H5T_NATIVE_INT in C - (H5T_NATIVE_INTEGER in FORTRAN) is an integer datatype - for the machine on which the library was compiled. -

          - -

        • The mem_space_id parameter is the identifier of the dataset's - memory dataspace. H5S_ALL in C (H5S_ALL_F - in FORTRAN) is the default value and indicates that the whole dataspace - in memory is selected for the I/O operation. - This parameter is optional in FORTRAN; if it is omitted, the default - will be used. -

          - -

        • The file_space_id parameter is the identifier of the - dataset's file dataspace. - H5S_ALL in C (H5S_ALL_F in FORTRAN) - is the default value and indicates that the entire dataspace of - the dataset in the file is selected for the I/O operation. - This parameter is optional in FORTRAN; if it is omitted, the default - will be used. -

          - -

        • The xfer_prp parameter is the data transfer propery list - identifier. - H5P_DEFAULT in C - (H5P_DEFAULT_F in FORTRAN) is the default value and - indicates that the default data transfer property list is used. - This parameter is optional in FORTRAN; if it is omitted, the default - will be used. -

          - -

        • The buf parameter is the data buffer to write. -

          - -

        • In FORTRAN, the hdferr parameter is for the error code - passed back: 0 if successful, -1 if not. In C, this function - returns a non-negative value if successful; otherwise it returns - a negative value. -
        -

        -

      • H5Dread/h5dread_f reads raw data from the - specified dataset to an application buffer, - converting from the file datatype and dataspace to the memory datatype and - dataspace. -

        -C: -

        -  herr_t H5Dread (hid_t dset_id, hid_t mem_type_id, hid_t mem_space_id, 
        -                  hid_t file_space_id, hid_t xfer_prp, void * buf) 
        -
        -FORTRAN: -
        -  h5dread_f (dset_id, mem_type_id, buf, hdferr, mem_space_id, &
        -                  file_space_id, xfer_prp)
        -
        -        dset_id        INTEGER(HID_T)
        -        mem_type_id    INTEGER(HID_T)
        -        buf(*,...*)    TYPE
        -        hdferr         INTEGER 
        -                       (Possible values: 0 on success and -1 on failure)
        -        mem_space_id   INTEGER(HID_T), OPTIONAL
        -                       (Default value: H5S_ALL_F)
        -        file_space_id  INTEGER(HID_T), OPTIONAL
        -                       (Default value: H5S_ALL_F)
        -        xfer_prp       INTEGER(HID_T), OPTIONAL
        -                       (Default value: H5P_DEFAULT_F)
        -
        -
        - -

        -

          -
        • The dset_id parameter is the dataset identifier. -

          - -

        • The mem_type_id parameter is the identifier of the dataset's - memory datatype. H5T_NATIVE_INT in C - (H5T_NATIVE_INTEGER in FORTRAN) is an integer datatype - for the machine on which the library was compiled. -

          - -

        • The mem_space_id parameter is the identifier of the dataset's - memory dataspace. H5S_ALL in C (H5S_ALL_F - in FORTRAN) is the default value and indicates that the whole dataspace - in memory is selected for the I/O operation. - This parameter is optional in FORTRAN; if it is omitted, the default - will be used. -

          - -

        • The file_space_id parameter is the identifier of the - dataset's file dataspace. - H5S_ALL in C (H5S_ALL_F in FORTRAN) - is the default value and indicates that the entire dataspace of - the dataset in the file is selected for the I/O operation. - This parameter is optional in FORTRAN; if it is omitted, the default - will be used. - -

          -

        • The xfer_prp parameter is the data transfer propery list - identifier. - H5P_DEFAULT in C - (H5P_DEFAULT_F in FORTRAN) is the default value and - indicates that the default data transfer property list is used. - This parameter is optional in FORTRAN; if it is omitted, the default - will be used. -

          - -

        • The buf parameter is the data buffer to read into. -

          - -

        • In FORTRAN, the hdferr parameter is for the error code - passed back: 0 if successful, -1 if not. In C, this function - returns a non-negative value if successful; otherwise it returns - a negative value. -
        -
      -
      -

      File Contents

      -Figure 6.1a shows the contents of dset.h5 (created by the C program). -
      -Figure 6.1b shows the contents of dsetf.h5 (created by the FORTRAN -program). -

      - Fig. 6.1a   dset.h5 in DDL -

      -      HDF5 "dset.h5" {
      -      GROUP "/" {
      -         DATASET "dset" {
      -            DATATYPE { H5T_STD_I32BE }
      -            DATASPACE { SIMPLE ( 4, 6 ) / ( 4, 6 ) }
      -            DATA {
      -               1, 2, 3, 4, 5, 6,
      -               7, 8, 9, 10, 11, 12,
      -               13, 14, 15, 16, 17, 18,
      -               19, 20, 21, 22, 23, 24
      -            }
      -         }
      -      }
      -      }
      -
      -

      - Fig. 6.1b   dsetf.h5 in DDL -

      -HDF5 "dsetf.h5" {
      -GROUP "/" {
      -   DATASET "dset" {
      -      DATATYPE { H5T_STD_I32BE }
      -      DATASPACE { SIMPLE ( 6, 4 ) / ( 6, 4 ) }
      -      DATA {
      -         1, 7, 13, 19,
      -         2, 8, 14, 20,
      -         3, 9, 15, 21,
      -         4, 10, 16, 22,
      -         5, 11, 17, 23,
      -         6, 12, 18, 24
      -      }
      -   }
      -}
      -}
      -
      - - - - - - -


      - -
      NCSA
      - The National Center for Supercomputing Applications

      - University of Illinois - at Urbana-Champaign
      -
      - - -hdfhelp@ncsa.uiuc.edu -
      -
      Last Modified: June 22, 2001

      - - -
      -
      - - - - - - - diff --git a/doc/html/Tutor/references.html b/doc/html/Tutor/references.html deleted file mode 100644 index 3787e88..0000000 --- a/doc/html/Tutor/references.html +++ /dev/null @@ -1,66 +0,0 @@ - -HDF5 Tutorial - References - - - - - - - - - [ HDF5 Tutorial Top ] -

      -References -

      - -
      - - - - - - - -


      - - NCSA
      - The National Center for Supercomputing Applications

      - University of Illinois - at Urbana-Champaign
      -
      - - -hdfhelp@ncsa.uiuc.edu -
      -
      Last Modified: June 22, 2001

      - -
      -
      - - - - - - - diff --git a/doc/html/Tutor/reftoobj.html b/doc/html/Tutor/reftoobj.html deleted file mode 100644 index ad996d1..0000000 --- a/doc/html/Tutor/reftoobj.html +++ /dev/null @@ -1,318 +0,0 @@ - -HDF5 Tutorial - References to Objects - - - - - - - - - [ HDF5 Tutorial Top ] -

      - References to Objects -

      - -
      - - -

      Contents:

      - -
      - -

      References to Objects

      -In HDF5, objects (i.e. groups, datasets, and named datatypes) are usually -accessed by name. This access method was discussed in previous sections. -There is another way to access stored objects - by reference. -

      -An object reference is based on the relative file address of the object header -in the file and is constant for the life of the object. Once a reference to -an object is created and stored in a dataset in the file, it can be used -to dereference the object it points to. References are handy for creating -a file index or for grouping related objects by storing references to them in -one dataset. -

      - -

      Creating and Storing References to Objects

      -The following steps are involved in creating and storing file references -to objects: -
        -
      1. Create the objects or open them if they already exist in the file. -

        -

      2. Create a dataset to store references to the objects. -

        -

      3. Create and store references to the objects in a buffer. -

        -

      4. Write the buffer containing the references to the dataset. -
      - -
      -

      Reading References and Accessing Objects Using References

      - -The following steps are involved in reading references to objects and -accessing objects using references: -
        -
      1. Open the dataset with the references and read them. The -H5T_STD_REF_OBJ - datatype must be used to describe the memory datatype. -

        -

      2. Use the read reference to obtain the identifier of the object the - reference points to. -

        -

      3. Open the dereferenced object and perform the desired operations. -

        -

      4. Close all objects when the task is complete. -
      - -

      Programming Example

      -
      -

      Description

      -The example below first creates a group in the file. -It then creates two datasets and a named datatype in that group. -References to these four objects are stored in a dataset in the root group. -

      -After that, it opens and reads the reference dataset from the file created -previously, then dereferences the references. - -

      -NOTE: To download a tar file of the examples, including a Makefile, -please go to the References page. -

      -Following is the output from the examples: -

      -  Data has been successfully written to the dataset 
      -  Stored datatype is of a FLOAT class
      -
      - - - -

      Remarks

      - -
        -
      • The following code creates a dataset in which to store the references. -

        -C:

        -    dset2_id = H5Dcreate (file_id, dsetname, H5T_STD_REF_OBJ, 
        -                         space_id, H5P_DEFAULT);
        -
        -

        -FORTRAN:

        -    CALL h5dcreate_f (file_id, dsetname, H5T_STD_REF_OBJ, & 
        -                      space_id, dset2_id, hdferr)
        -
        -

        - Notice that the H5T_SDT_REF_OBJ - datatype is used to specify that references to objects will be - stored. The datatype H5T_STD_REF_DSETREG is - used to store the dataset - region references and will be discussed later in this tutorial. -

        -

      • The next few calls to H5Rcreate / h5rcreate_f - create references to the objects. The signature of - H5Rcreate / h5rcreate_f is as follows: -

        -C:  

        -    herr_t H5Rcreate (void* ref, hid_t loc_id, const char *name, 
        -                      H5R_type_t ref_type, hid_t space_id)    
        -
        -

        -FORTRAN:  

        -    h5rcreate_f (loc_id, name, ref, hdferr)
        -
        -           loc_id     IN: INTEGER (HID_T) 
        -           name       IN: CHARACTER(LEN=*)
        -           ref       OUT: TYPE (hobj_ref_t_f)
        -           hdferr    OUT: INTEGER
        -
        -

        - - -

          -
        • The ref argument specifies the buffer - in which to store the reference. -

          -

        • The loc_id and name arguments specify the name of - the referenced object. -

          -

        • In C, the ref_type argument specifies the type of the - reference. - Our example uses references to objects, H5R_OBJECT. - References to dataset regions, H5R_DATASET_REGION, - will be discussed later in this tutorial. -

          -

        • In C, the space_id argument specifies the dataspace - identifier. When references - to the objects are created, this argument should be set to -1. -

          -

        • In FORTRAN, the return value from the h5rcreate_f - call is in hdferr: 0 if successful, -1 otherwise. - In C, H5Rcreate returns a non-negative value if - successful and a negative value otherwise. -
        -

        -

      • H5Dwrite / h5dwrite_f writes a - dataset containing the references. - Notice that the H5T_SDT_REF_OBJ datatype is used to - describe the dataset's memory datatype. -

        -

      -
        -
      • H5Dread / h5dread_f - reads the dataset containing the - references to the objects. The H5T_STD_REF_OBJ memory - datatype was - used to read references to memory. -

        -

      • H5Rdereference / h5rdereference_f obtains - the object's identifier. The signature is as follows: -

        -C:

        -    hid_t H5Rdereference (hid_t dset_id, H5R_type_t ref_type, 
        -                          void *ref)
        -
        -

        -FORTRAN:

        -    h5rdereference_f (dset_id, ref, obj_id, hdferr)
        -
        -            dset_id    IN:   INTEGER (HID_T)
        -            ref        IN:   TYPE (hobj_ref_t_f)
        -            obj_id    OUT:   INTEGER (HID_T)
        -            hdferr    OUT:   INTEGER
        -
        -

        -

          -
        • The dset_id argument is the identifier of the dataset - containing the references. -

          -

        • In C, the ref_type argument specifies the reference type. -

          -

        • The ref argument is a buffer containing the reference - to be dereferenced. -

          -

        • The C function returns the identifier of the object that the - reference points to or a negative value if it is unsuccessful. - In FORTRAN, the object identifier is returned in obj_id - and the return code is returned in hdferr. -

          - In our simplified situation, we know what type of object was - stored in the dataset. When the type of the object is unknown, - H5Rget_object_type should be used to identify the type - of object the reference points to. -

        -
      - - -
      -

      File Contents

      -

      -HDF5 File Created by C Example -

      -Fig. A   REF_OBJ.h5 in DDL - -

      -HDF5 "REF_OBJ.h5" {
      -GROUP "/" {
      -   GROUP "GROUP1" {
      -      GROUP "GROUP2" {
      -      }
      -   }
      -   DATASET "INTEGERS" {
      -      DATATYPE { H5T_STD_I32BE }
      -      DATASPACE { SIMPLE ( 5 ) / ( 5 ) }
      -      DATA {
      -         1, 2, 3, 4, 5
      -      }
      -   }
      -   DATATYPE "MYTYPE" {
      -   }
      -   DATASET "OBJECT_REFERENCES" {
      -      DATATYPE { H5T_REFERENCE }
      -      DATASPACE { SIMPLE ( 4 ) / ( 4 ) }
      -      DATA {
      -         GROUP 0:1320, GROUP 0:2272, DATASET 0:2648, DATATYPE 0:3244
      -      }
      -   }
      -}
      -}
      -
      -
      -
      -HDF5 File Created by FORTRAN Example: -

      -Fig. B   FORTRAN.h5 in DDL - -

      -HDF5 "FORTRAN.h5" {
      -GROUP "/" {
      -   GROUP "GROUP1" {
      -      GROUP "GROUP2" {
      -      }
      -   }
      -   DATASET "INTEGERS" {
      -      DATATYPE { H5T_STD_I32BE }
      -      DATASPACE { SIMPLE ( 5 ) / ( 5 ) }
      -      DATA {
      -         1, 2, 3, 4, 5
      -      }
      -   }
      -   DATATYPE "MyType" {
      -   }
      -   DATASET "OBJECT_REFERENCES" {
      -      DATATYPE { H5T_REFERENCE }
      -      DATASPACE { SIMPLE ( 4 ) / ( 4 ) }
      -      DATA {
      -         GROUP 0:1344, GROUP 0:2320, DATASET 0:2696, DATATYPE 0:3292
      -      }
      -   }
      -}
      -}
      -
      -

      - -Notice how the data in the reference dataset is described. The two numbers -separated by a colon represent a unique identifier of the object. These -numbers are constant for the life of the object. - - - -


      - -
      NCSA
      - The National Center for Supercomputing Applications

      - University of Illinois - at Urbana-Champaign
      -
      - - -hdfhelp@ncsa.uiuc.edu -
      -
      Last Modified: June 22, 2001

      - - -
      -
      - - - - - - - diff --git a/doc/html/Tutor/reftoreg.html b/doc/html/Tutor/reftoreg.html deleted file mode 100644 index 3745d91..0000000 --- a/doc/html/Tutor/reftoreg.html +++ /dev/null @@ -1,366 +0,0 @@ - -HDF5 Tutorial - References to Dataset Regions - - - - - - - - -[ HDF5 Tutorial Top ] -

      References to Dataset Regions -

      - -
      - - -

      Contents:

      - -
      - -

      References to Dataset Regions

      -Previously you learned about creating, reading, and writing -dataset selections. Here you will learn how to store dataset -selections in a file, and how to read them back using references -to dataset regions. -

      -A dataset region reference points to the dataset selection by storing the -relative file address of the dataset header and the global heap offset of -the referenced selection. The selection referenced is located by retrieving -the coordinates of the areas in the selection from the global heap. This -internal mechanism of storing and retrieving dataset selections is transparent -to the user. A reference to a dataset selection (a region) is constant for -the life of the dataset. - -

      Creating and Storing References to Dataset Regions

      -The following steps are involved in creating and storing references to -dataset regions: -
        - -
      1. Create a dataset in which to store the dataset regions (the selections). -

        -

      2. Create selections in the dataset(s). -The dataset(s) should already exist in the file. -

        -

      3. Create references to the selections and store them in a buffer. -

        -

      4. Write the dataset region references to the file. -

        -

      5. Close all objects. -
      - -
      -

      Reading References to Dataset Regions

      - -The following steps are involved in reading references to dataset -regions and referenced dataset regions (selections). -
        -
      1. Open and read the dataset containing references to the dataset regions. - The datatype H5T_STD_REF_DSETREG must be used during -the read operation. -

        -

      2. Use H5Rdereference / h5rdeference_f to -obtain the dataset identifier from the read - dataset region reference. - - OR - - Use H5Rget_region / h5rget_region_f to obtain - the dataspace identifier for the dataset - containing the selection from the read dataset region reference. -

        -

      3. Obtain information about the selection or read selected data from - the dataset. -

        -

      4. Close all objects when they are no longer needed. -
      - -

      Programming Example

      -
      -

      Description

      -The example below first creates a dataset in the file. Then it creates a -dataset to store references to the dataset regions (selections). -The first selection is a 6 x 6 hyperslab. The second selection is a point -selection in the same dataset. -References to both selections are created and stored in the buffer and then -written to the dataset in the file. -

      -After creating the dataset and references, the program reads the dataset -containing the dataset region references. -It reads data from the dereferenced dataset and displays the number of -elements and raw data. Then it reads two selections, a hyperslab selection -and a point selection. The program queries a number of points in the -hyperslab and their coordinates and displays them. Then it queries a number of -selected points and their coordinates and displays the information.
      -

      -To obtain the example, download: -

      -NOTE: To download a tar file of the examples, including a Makefile, -please go to the References page. -

      - -Following is the output from the examples: -

      -Output of C Example -

      -Selected hyperslab: 
      -0 0 0 3 3 4 0 0 0 
      -0 0 0 3 4 4 0 0 0 
      -Selected points: 
      -1 0 0 0 0 0 0 0 6 
      -0 0 0 0 0 0 5 0 0 
      -
      -Output of FORTRAN Example -
      - Hyperslab selection
      -
      - 3*0,  2*3,  4,  3*0
      - 3*0,  3,  2*4,  3*0
      -
      - Point selection
      -
      - 1,  7*0,  6
      - 6*0,  5,  2*0
      -
      - - -

      Remarks

      -
        -
      • The following code creates a dataset to store references to the - dataset(s) regions (selections). Notice that the - H5T_STD_REF_DSETREG datatype is used. -

        -C: -

        -    dset1 = H5Dcreate (file_id, dsetnamer, H5T_STD_REF_DSETREG,
        -                       spacer_id, creation_prp);
        -
        -

        -FORTRAN: -

        -    CALL h5dcreate_f (file_id, dsetnamer, H5T_STD_REF_DSETREG, &
        -                     spacer_id, dset1, hdferr, creation_prp)
        -
        -

        - -

      • This program uses hyperslab and point selections. We used the dataspace - identifier for the calls to H5Sselect_hyperslab / - h5sselect_hyperslab_f and - H5Sselect_elements / h5sselect_elements_f. - The identifier was obtained when the dataset was - created and it describes the dataset's dataspace. We did not close it when - the dataset was closed to decrease the number of function calls used - in the example. - In a real application program, one should open the dataset and determine - its dataspace using the H5Dget_space / - h5dget_space_f function. -

        -

      • H5Rcreate / h5rcreate_f is used to create a -dataset region reference. The signature of the function is as follows: -

        -C: -

        -    herr_t H5Rcreate (void *ref, hid_t loc_id, const char *name,
        -                      H5R_type_t ref_type, hid_t space_id)
        -
        -

        -FORTRAN:   -

        -    h5rcreate_f (loc_id, name, space_id, ref, hdferr)
        -
        -        loc_id      IN: INTEGER (HID_T)
        -        name        IN: CHARACTER (LEN=*) 
        -        space_id    IN:  INTEGER (HID_T)
        -        ref_type   OUT: TYPE(hdset_reg_ref_t_f)
        -        hdferr     OUT: INTEGER 
        -
        -

        -

          -
        • The ref argument specifies the buffer in which - to store the reference. -

          -

        • The loc_id and name arguments specify the - referenced dataset. -

          -

        • In C, the ref_type argument specifies the reference type. - Since we are creating references to the dataset regions, - the H5R_DATASET_REGION datatype is used. -

          -

        • The space_id argument is a dataspace identifier. - This dataspace includes a selection in the referenced dataset. -

          -

        • In C, the function H5Rcreate returns a non-negative - value if successful and a negative value otherwise. In FORTRAN, the - return code from the h5rcreate_f subroutine is - returned in hdferr: 0 if succesful and -1 otherwise. -
        -

        -

      • The dataset with the region references was read by - H5Dread / h5dread_f with - the H5T_STD_REF_DSETREG datatype specified. -

        -

      • The read reference can be used to obtain the dataset identifier, as we - did with the following call: -

        -C: -

        -    dset2 = H5Rdereference (dset1, H5R_DATASET_REGION, &ref_out[0]);
        -
        -

        -FORTRAN: -

        -    CALL h5rdereference_f (dset1, ref_out(1), dset2, hdferr)
        -
        -

        - or to obtain spacial information ( dataspace and selection ) with the call - to H5Rget_region / h5rget_region_f: -

        -C: -

        -    dspace2 = H5Rget_region (dset1, H5R_DATASET_REGION, &ref_out[0]);
        -
        -

        -FORTRAN: -

        -    CALL H5rget_region_f (dset1, ref_out(1), dspace2, hdferr)
        -
        -

        - The reference to the dataset region has information for both the dataset - itself and its selection. In both calls, -

          -
        • the dset1 parameter is the identifier for the dataset - with the region references and -

          -

        • the ref_out parameter specifies the type of reference - stored. In this example a reference to the dataset region is stored. -
        -

        - The C function returns the dataspace identifier or a - negative value if it is not successful. - In FORTRAN, the dataset identifier or dataspace identifier - is returned in dset2 or dspace2 - and the return code for the call is returned in hdferr: - 0 if successful and -1 otherwise. -

        -

      - - - -
      -

      File Contents

      -

      -HDF5 File Created by C Example -

      -Fig. A   REF_REG.h5 in DDL -

      -
      -HDF5 "REF_REG.h5" {
      -GROUP "/" {
      -   DATASET "MATRIX" {
      -      DATATYPE { H5T_STD_I32BE }
      -      DATASPACE { SIMPLE ( 2, 9 ) / ( 2, 9 ) }
      -      DATA {
      -         1, 1, 2, 3, 3, 4, 5, 5, 6,
      -         1, 2, 2, 3, 4, 4, 5, 6, 6
      -      }
      -   }
      -   DATASET "REGION_REFERENCES" {
      -      DATATYPE { H5T_REFERENCE }
      -      DATASPACE { SIMPLE ( 2 ) / ( 2 ) }
      -      DATA {
      -         DATASET 0:744 {(0,3)-(1,5)}, DATASET 0:744 {(0,0), (1,6), (0,8)}
      -      }
      -   }
      -}
      -}
      -
      -
      -HDF5 File Created by FORTRAN Example: -

      -Fig. B   FORTRAN.h5 in DDL -

      -
      -HDF5 "FORTRAN.h5" {
      -GROUP "/" {
      -   DATASET "MATRIX" {
      -      DATATYPE { H5T_STD_I32BE }
      -      DATASPACE { SIMPLE ( 9, 2 ) / ( 9, 2 ) }
      -      DATA {
      -         1, 1,
      -         1, 2,
      -         2, 2,
      -         3, 3,
      -         3, 4,
      -         4, 4,
      -         5, 5,
      -         5, 6,
      -         6, 6
      -      }
      -   }
      -   DATASET "REGION_REFERENCES" {
      -      DATATYPE { H5T_REFERENCE }
      -      DATASPACE { SIMPLE ( 2 ) / ( 2 ) }
      -      DATA {
      -         DATASET 0:744 {(3,0)-(5,1)}, DATASET 0:744 {(0,0), (6,1), (8,0)}
      -      }
      -   }
      -}
      -}
      -
      - -Notice how the raw data in the dataset with the dataset regions is displayed. -Each element of the raw data consists of a reference to the dataset -(DATASET number1:number2) and its selected region. -If the selection is a hyperslab, the corner coordinates of the hyperslab -are displayed. -For the point selection, the coordinates of each point are displayed. - - - - - -


      - -
      NCSA
      - The National Center for Supercomputing Applications

      - University of Illinois - at Urbana-Champaign
      -
      - - -hdfhelp@ncsa.uiuc.edu -
      -
      Last Modified: June 22, 2001

      - - -
      -
      - - - - - - - diff --git a/doc/html/Tutor/select.html b/doc/html/Tutor/select.html deleted file mode 100644 index b4109b0..0000000 --- a/doc/html/Tutor/select.html +++ /dev/null @@ -1,309 +0,0 @@ - -HDF5 Tutorial - Hyperslab Selections - - - - - - - - - [ HDF5 Tutorial Top ] -

      -Hyperslab Selections -

      - -
      - - -

      Contents:

      - -
      - -

      Selecting a Portion of a Dataspace

      -Hyperslabs are portions of datasets. A hyperslab selection can be a -logically contiguous collection of points in a dataspace, or it -can be a regular pattern of points or blocks in a dataspace. -You can select a hyperslab to write to or read from with the function -H5Sselect_hyperslab / h5sselect_hyperslab_f. -

      -

      Programming Example

      -
      -

      Description

      -This example creates a 5 x 6 integer array in a file called sds.h5 -(sdsf.h5 in FORTRAN). It -selects a 3 x 4 hyperslab from the dataset as follows (Dimension 0 is -offset by 1 and Dimension 1 is offset by 2): -

      -5 x 6 array: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
            
         -

      X

      -

      X

      -

      X

      -

      X

         -

      X

      -

      X

      -

      X

      -

      X

         -

      X

      -

      X

      -

      X

      -

      X

            
      -

      -Then it reads the hyperslab from this file into a 2-dimensional plane -(size 7 x 7) of a 3-dimensional array (size 7 x 7 x 3), as -follows (with Dimension 0 offset by 3): -

      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
             
             
             
      -

      X

      -

      X

      -

      X

      -

      X

         
      -

      X

      -

      X

      -

      X

      -

      X

         
      -

      X

      -

      X

      -

      X

      -

      X

         
             
      -

      - -To obtain the example, download: -

      -NOTE: To download a tar file of the examples, including a Makefile, -please go to the References page. -

      - - -

      Remarks

      -
        -
      • H5Sselect_hyperslab / h5sselect_hyperslab_f -selects a hyperslab region to -add to the current selected region for a specified dataspace. -

        -C: -

        -    herr_t H5Sselect_hyperslab (hid_t space_id, H5S_seloper_t operator,
        -        const hsize_t *start, const hsize_t *stride,
        -        const hsize_t *count, const hsize_t *block ) 
        -
        -

        -FORTRAN: -

        -    h5sselect_hyperslab_f (space_id, operator, start, count, &
        -                           hdferr, stride, block)
        -
        -            space_id    IN: INTEGER(HID_T) 
        -            operator    IN: INTEGER 
        -            start       IN: INTEGER(HSIZE_T), DIMENSION(*)
        -            count       IN: INTEGER(HSIZE_T), DIMENSION(*)
        -            hdferr     OUT: INTEGER
        -            stride      IN: INTEGER(HSIZE_T), DIMENSION(*), OPTIONAL
        -            block       IN: INTEGER(HSIZE_T), DIMENSION(*), OPTIONAL 
        -
        -

        -

          -
        • The parameter space_id is the dataspace identifier for the - specified dataspace. -

          -

        • The parameter operator can be set to one of the following: -
          -
          H5S_SELECT_SET (H5S_SELECT_SET_F in FORTRAN) -
          Replace the existing selection with the parameters from this call. - Overlapping blocks are not supported with this operator. - -
          H5S_SELECT_OR (H5S_SELECT_OR_F in FORTRAN) -
          Add the new selection to the existing selection. -
          - -

          -

        • The start array determines the starting coordinates of the -hyperslab to select. -

          -

        • The stride array indicates which elements along a dimension are to -be selected. -

          -

        • The count array determines how many positions to select from the - dataspace in each dimension. -

          -

        • The block array determines the size of the element block selected - by the dataspace. -

          -

        • In C, a non-negative value is returned if successful, and a negative -value otherwise. In FORTRAN, the return value is returned in hdferr: -0 if successful and -1 otherwise. -
        -

        -The start, stride, count, and block arrays must -be the same size as the rank of the dataspace. -

        -

      • The examples introduce the following call: -
        -
        H5Dget_space / h5dget_space_f: -
        Returns an identifier for a copy of the dataspace of a dataset.

        -

        -
      • The C example also introduces the following calls: -
        -
        H5Sget_simple_extent_dims: -
        Returns the size and maximum size of each dimension of a dataspace. -
        H5Sget_simple_extent_ndims: -
        Determines the dimensionality (or rank) of a dataspace. -
        -

        -The FORTRAN example does not use these calls, though they -are available as h5sget_simple_extent_dims_f and -h5sget_simple_extent_ndims_f. - -

      - - - -

      - - - - - - - - - -


      - -
      NCSA
      - The National Center for Supercomputing Applications

      - University of Illinois - at Urbana-Champaign
      -
      - - -hdfhelp@ncsa.uiuc.edu -
      -
      Last Modified: June 22, 2001

      - - -
      -
      - - - - - - - diff --git a/doc/html/Tutor/selectc.html b/doc/html/Tutor/selectc.html deleted file mode 100644 index 59c464c..0000000 --- a/doc/html/Tutor/selectc.html +++ /dev/null @@ -1,265 +0,0 @@ - -HDF5 Tutorial - Selecting Individual Points and Copying a Dataspace - - - - - - - - - [ HDF5 Tutorial Top ] -

      -Selecting Individual Points and Copying -a Dataspace -

      - -
      - - -

      Contents:

      - -
      - -

      Description

      -You can select independent points to read from or write to in a -dataspace by use of the H5Sselect_elements / -h5sselect_elements_f function. -

      -The H5Scopy / h5scopy_f function allows -you to make an exact copy of a dataspace. -This can reduce the number of function calls needed when -selecting a dataspace. -

      -

      Programming Example

      - -
      -

      Description

      -This example shows how to use H5Sselect_elements / -h5sselect_elements_f -to select individual points in a dataset and how to use -H5Scopy / h5scopy_f -to make a copy of a dataspace. -
      -NOTE: To download a tar file of the examples, including a Makefile, -please go to the References page. - - - -

      Remarks

      -
        -
      • H5Sselect_elements / h5sselect_elements_f -selects array elements to be -included in the selection for a dataspace: -

        -C: -

        -   herr_t H5Sselect_elements (hid_t space_id, H5S_seloper_t operator,
        -                              size_t num_elements, 
        -                              const hsize_t **coord ) 
        -
        -

        -FORTRAN: -

        -   h5sselect_elements_f (space_id, operator, num_elements, coord, hdferr)
        -
        -      space_id       IN: INTEGER(HID_T) 
        -      operator       IN: INTEGER
        -      num_elements   IN: INTEGER
        -      coord          IN: INTEGER(HSIZE_T), DIMENSION(*,*)
        -      hdferr        OUT: INTEGER
        -
        -

        -

          -
        • The space_id parameter is the dataspace identifier. -

          -

        • The operator parameter can be set to one of the following values: -
          -
          H5S_SELECT_SET (H5S_SELECT_SET_F in FORTRAN) -
          Replace the existing selection with the parameters from this call. - Overlapping blocks are not supported with this operator. -
          H5S_SELECT_OR (H5S_SELECT_OR_F in FORTRAN) -
          Add the new selection to the existing selection. -
          -

          -

        • The coord array is a two-dimensional array of size -NUMP x RANK in C -(RANK x NUMP in FORTRAN) -where NUMP is the number of selected points -and RANK is the rank of the dataset. -Note that these coordinates are 0-based in C and 1-based in FORTRAN. -

          - Consider the non-zero elements of the following array: -

          -            0  59   0  53
          -            0   0   0   0
          -            0   0   1   0    
          - In C, the coord array selecting these points would be as follows: -
          -            0   1
          -            0   3
          -            2   2            
          - While in FORTRAN, the coord array would be as follows: -
          -            1   1   3
          -            2   4   3        
          -

          -

        • In C, this function returns a non-negative value if successful and -a negative value otherwise. In FORTRAN, the value returned in hdferr -indicates whether it was successful (0) or not (-1). -
        -

        -

      • H5Scopy / h5scopy_f creates an exact copy of a dataspace: -

        -C: -                  -

        -   hid_t H5Scopy (hid_t space_id) 
        -
        -FORTRAN:   -
        -   h5scopy_f (space_id, new_space_id, hdferr)  
        -
        -      space_id       IN: INTEGER(HID_T) 
        -      new_space_id  OUT: INTEGER(HID_T)
        -      hdferr        OUT: INTEGER 
        -
        -

        -

          -
        • The space_id parameter is the dataspace identifier to copy. -

          -

        • In C, the identifier to the dataspace's copy is returned if the -function is successful and a negative value is returned if not. In -FORTRAN, the new dataspace identifier is returned in new_space_id -and the return value is returned in hdferr ( 0 if successful and --1 if not). -
        -
      - -

      - - -

      File Contents

      - -Following is the DDL for copy1.h5 and copy2.h5, as viewed with -the following commands:
      -             -h5dump copy1.h5
      -             -h5dump copy2.h5 - -

      -


      -C:

      -Fig. S.1a   copy1.h5 in DDL -

      -   HDF5 "copy1.h5" {
      -   GROUP "/" {
      -      DATASET "Copy1" {
      -         DATATYPE { H5T_STD_I32BE }
      -         DATASPACE { SIMPLE ( 3, 4 ) / ( 3, 4 ) }
      -         DATA {
      -            0, 59, 0, 53,
      -            0, 0, 0, 0,
      -            0, 0, 0, 0
      -         }
      -      }
      -   }
      -   }
      -
      -Fig. S.1b   copy2.h5 in DDL -
      -   HDF5 "copy2.h5" {
      -   GROUP "/" {
      -      DATASET "Copy2" {
      -         DATATYPE { H5T_STD_I32BE }
      -         DATASPACE { SIMPLE ( 3, 4 ) / ( 3, 4 ) }
      -         DATA {
      -            1, 59, 1, 53,
      -            1, 1, 1, 1,
      -            1, 1, 1, 1
      -         }
      -      }
      -   }
      -   }
      -
      -
      -FORTRAN:

      -Fig. S.2a   copy1.h5 in DDL -

      -   HDF5 "copy1.h5" {
      -   GROUP "/" {
      -      DATASET "Copy1" {
      -         DATATYPE { H5T_STD_I32BE }
      -         DATASPACE { SIMPLE ( 4, 3 ) / ( 4, 3 ) }
      -         DATA {
      -            0, 0, 0,
      -            53, 0, 0,
      -            0, 0, 0,
      -            59, 0, 0
      -         }
      -      }
      -   }
      -   }
      -
      -Fig. S.2b   copy2.h5 in DDL -
      -   HDF5 "copy2.h5" {
      -   GROUP "/" {
      -      DATASET "Copy2" {
      -         DATATYPE { H5T_STD_I32BE }
      -         DATASPACE { SIMPLE ( 4, 3 ) / ( 4, 3 ) }
      -         DATA {
      -            1, 1, 1,
      -            53, 1, 1,
      -            1, 1, 1,
      -            59, 1, 1
      -         }
      -      }
      -   }
      -   }
      -
      - - - - -


      - -
      NCSA
      - The National Center for Supercomputing Applications

      - University of Illinois - at Urbana-Champaign
      -
      - - -hdfhelp@ncsa.uiuc.edu -
      -
      Last Modified: June 22, 2001

      - - -
      -
      - - - - - - - diff --git a/doc/html/Tutor/software.html b/doc/html/Tutor/software.html deleted file mode 100644 index 074bfc8..0000000 --- a/doc/html/Tutor/software.html +++ /dev/null @@ -1,85 +0,0 @@ - -HDF5 Tutorial - Obtaining HDF5 Software - - - - - - - - [ HDF5 Tutorial Top ] -

      -Obtaining HDF5 Software -

      - -
      - - -If you will be compiling in: -
      -
      C: -
      You will need the HDF5 library. We provide pre-compiled binaries -for the platforms on which we tested at: -
              -ftp://ftp.ncsa.uiuc.edu/HDF/HDF5/current/bin/ -

      -If using the pre-compiled binaries you must -also obtain the GZIP library, as they were compiled with GZIP included, but do -not include this library. We provide the GZIP library for the platforms on -which we tested at: -
              -ftp://ftp.ncsa.uiuc.edu/HDF/gzip/ -

      -You can build the HDF5 library yourself, if need be. The source code -can be obtained from: -
              -ftp://ftp.ncsa.uiuc.edu/HDF/HDF5/current/src/ -

      -For further information regarding HDF5, check the HDF5 home page: -
              -http://hdf.ncsa.uiuc.edu/HDF5/ -

      -

      FORTRAN 90: -
      With HDF5-1.4.0, support for Fortran 90 is included as part of -the installation of the HDF5 library. The pre-compiled binaries include -the Fortran library. If you need to build from source, download the -HDF5-1.4.0 source code and compile it with the --enable-fortran flag. -Read the instructions in the -RELEASE.txt -file for further details. - -

      -

      Java: -
      You will need the JHI5 code. Go to the -Java HDF5 web page -for information on the Java-HDF5 software. The Java Tutorial examples -are included with this tutorial: -
              - ./examples/java/ -
      - - - - -


      - - NCSA
      - The National Center for Supercomputing Applications

      - University of Illinois - at Urbana-Champaign
      -
      - - -hdfhelp@@ncsa.uiuc.edu -
      Last Modified: June 22, 2001

      - - -
      -
      - - - - - diff --git a/doc/html/Tutor/title.html b/doc/html/Tutor/title.html deleted file mode 100644 index e045353..0000000 --- a/doc/html/Tutor/title.html +++ /dev/null @@ -1,105 +0,0 @@ - -HDF5 Tutorial - - - - - - - - -

      -What is HDF5? - HDF5 Tutorial -

      - - - -
      -
      - - [ Home ] - [ Index ] - [ Products ] - [ Newsletters ] - [ Documentation ] -
      - -
      -
      -
      - -

      - -          -NOTE:   -This tutorial does NOT include the software needed to -
                    -              -compile the examples. You will need to -obtain it first. - -

      Contents:

      -

      Introductory Topics

      -
        -
      1. Introduction -
      2. HDF5 File Organization -
      3. The HDF5 API -
      4. Creating an HDF5 File -
      5. Creating a Dataset -
      6. Reading from or Writing to a Dataset -
      7. Creating an Attribute -
      8. Creating a Group -
      9. Creating Groups Using Absolute and - Relative Names -
      10. Creating Datasets in Groups -
      - -

      Advanced Topics

      - -

      -HDF5 Utilities - h5ls/h5dump
      -Glossary
      -References
      -Example Programs from this Tutorial
      -

    - - - - -


    - - NCSA
    - The National Center for Supercomputing Applications

    - University of Illinois - at Urbana-Champaign
    -
    - - -hdfhelp@ncsa.uiuc.edu -
    Last Modified: June 23, 2001

    - -
    -
    - - - - - - - diff --git a/doc/html/Tutor/util.html b/doc/html/Tutor/util.html deleted file mode 100644 index 7397029..0000000 --- a/doc/html/Tutor/util.html +++ /dev/null @@ -1,85 +0,0 @@ - -HDF5 Tutorial - Utilities (h5dump, h5ls) - - - - - - - - - [ HDF5 Tutorial Top ] -

    -Utilities (h5dump, h5ls) -

    - -
    - -The h5dump and h5ls utilities can be used to examine the contents of an -hdf5 file. -

    - -

    h5dump

    -
    -h5dump [-h] [-bb] [-header] [-a ] [-d <names>] [-g <names>]
    -       [-l <names>] [-t <names>] <file>
    -
    -  -h            Print information on this command.
    -  -bb           Display the content of the boot block. The default is not to display.
    -  -header       Display header only; no data is displayed.
    -  -a <names>    Display the specified attribute(s).
    -  -d <names>    Display the specified dataset(s).
    -  -g <names>    Display the specified group(s) and all the members.
    -  -l <names>    Displays the value(s) of the specified soft link(s).
    -  -t <names>    Display the specified named data type(s).
    -  
    -  <names> is one or more appropriate object names.
    -
    -
    -

    h5ls

    -
    -h5ls [OPTIONS] FILE [OBJECTS...]
    -
    -   OPTIONS
    -      -h, -?, --help   Print a usage message and exit
    -      -d, --dump       Print the values of datasets
    -      -f, --full       Print full path names instead of base names
    -      -l, --label      Label members of compound datasets
    -      -r, --recursive  List all groups recursively, avoiding cycles
    -      -s, --string     Print 1-byte integer datasets as ASCII
    -      -wN, --width=N   Set the number of columns of output
    -      -v, --verbose    Generate more verbose output
    -      -V, --version    Print version number and exit
    -   FILE
    -      The file name may include a printf(3C) integer format such as
    -      "%05d" to open a file family.
    -   OBJECTS
    -      The names of zero or more objects about which information should be
    -      displayed.  If a group is mentioned then information about each of its
    -      members is displayed.  If no object names are specified then
    -      information about all of the objects in the root group is displayed.
    -
    - - -


    - -
    NCSA
    - The National Center for Supercomputing Applications

    - University of Illinois - at Urbana-Champaign
    -
    - - -hdfhelp@ncsa.uiuc.edu -
    -
    Last Modified: June 22, 2001

    - -
    -
    - - - - - diff --git a/doc/html/Version.html b/doc/html/Version.html deleted file mode 100644 index d465d04..0000000 --- a/doc/html/Version.html +++ /dev/null @@ -1,137 +0,0 @@ - - - - Version Numbers - - - -

    Version Numbers

    - -

    1. Introduction

    - -

    The HDF5 version number is a set of three integer values - written as either hdf5-1.2.3 or hdf5 version - 1.2 release 3. - -

    The 5 is part of the library name and will only - change if the entire file format and library are redesigned - similar in scope to the changes between HDF4 and HDF5. - -

    The 1 is the major version number and - changes when there is an extensive change to the file format or - library API. Such a change will likely require files to be - translated and applications to be modified. This number is not - expected to change frequently. - -

    The 2 is the minor version number and is - incremented by each public release that presents new features. - Even numbers are reserved for stable public versions of the - library while odd numbers are reserved for developement - versions. See the diagram below for examples. - -

    The 3 is the release number. For public - versions of the library, the release number is incremented each - time a bug is fixed and the fix is made available to the public. - For development versions, the release number is incremented more - often (perhaps almost daily). - -

    2. Abbreviated Versions

    - -

    It's often convenient to drop the release number when referring - to a version of the library, like saying version 1.2 of HDF5. - The release number can be any value in this case. - -

    3. Special Versions

    - -

    Version 1.0.0 was released for alpha testing the first week of - March, 1998. The developement version number was incremented to - 1.0.1 and remained constant until the the last week of April, - when the release number started to increase and development - versions were made available to people outside the core HDF5 - development team. - -

    Version 1.0.23 was released mid-July as a second alpha - version. - -

    Version 1.1.0 will be the first official beta release but the - 1.1 branch will also serve as a development branch since we're - not concerned about providing bug fixes separate from normal - development for the beta version. - -

    After the beta release we rolled back the version number so the - first release is version 1.0 and development will continue on - version 1.1. We felt that an initial version of 1.0 was more - important than continuing to increment the pre-release version - numbers. - -

    4. Public versus Development

    - -

    The motivation for separate public and development versions is - that the public version will receive only bug fixes while the - development version will receive new features. This also allows - us to release bug fixes expediently without waiting for the - development version to reach a stable state. - -

    Eventually, the development version will near completion and a - new development branch will fork while the original one enters a - feature freeze state. When the original development branch is - ready for release the minor version number will be incremented - to an even value. - -

    -

    - Version Example -
    Fig 1: Version Example -
    - -

    5. Version Support from the Library

    - -

    The library provides a set of macros and functions to query and - check version numbers. - -

    -
    H5_VERS_MAJOR -
    H5_VERS_MINOR -
    H5_VERS_RELEASE -
    These preprocessor constants are defined in the public - include file and determine the version of the include files. - -

    -
    herr_t H5get_libversion (unsigned *majnum, unsigned - *minnum, unsigned *relnum) -
    This function returns through its arguments the version - numbers for the library to which the application is linked. - -

    -
    void H5check(void) -
    This is a macro that verifies that the version number of the - HDF5 include file used to compile the application matches the - version number of the library to which the application is - linked. This check occurs automatically when the first HDF5 - file is created or opened and is important because a mismatch - between the include files and the library is likely to result - in corrupted data and/or segmentation faults. If a mismatch - is detected the library issues an error message on the - standard error stream and aborts with a core dump. - -

    -
    herr_t H5check_version (unsigned majnum, - unsigned minnum, unsigned relnum) -
    This function is called by the H5check() macro - with the include file version constants. The function - compares its arguments to the result returned by - H5get_libversion() and if a mismatch is detected prints - an error message on the standard error stream and aborts. -
    - -
    -
    HDF Help Desk
    -
    - - - -Last modified: Fri Oct 30 10:32:50 EST 1998 - - - - diff --git a/doc/html/chunk1.gif b/doc/html/chunk1.gif deleted file mode 100644 index 0260818..0000000 Binary files a/doc/html/chunk1.gif and /dev/null differ diff --git a/doc/html/chunk1.obj b/doc/html/chunk1.obj deleted file mode 100644 index 5936b0c..0000000 --- a/doc/html/chunk1.obj +++ /dev/null @@ -1,52 +0,0 @@ -%TGIF 3.0-p5 -state(0,33,100,0,0,0,16,1,9,1,1,0,0,3,0,1,1,'Courier',0,17,0,0,0,10,0,0,1,1,0,16,0,0,1,1,1,0,1088,1408,0,0,2880). -% -% @(#)$Header$ -% %W% -% -unit("1 pixel/pixel"). -page(1,"",1). -box('black',64,64,384,384,5,2,1,29,0,0,0,0,0,'2',[ -]). -poly('black',2,[ - 128,64,128,384],0,2,1,30,0,4,0,0,10,4,0,0,0,'2','10','4', - "0",[ -]). -poly('black',2,[ - 192,64,192,384],0,2,1,31,0,4,0,0,10,4,0,0,0,'2','10','4', - "0",[ -]). -poly('black',2,[ - 256,64,256,384],0,2,1,32,0,4,0,0,10,4,0,0,0,'2','10','4', - "0",[ -]). -poly('black',2,[ - 320,64,320,384],0,2,1,33,0,4,0,0,10,4,0,0,0,'2','10','4', - "0",[ -]). -poly('black',2,[ - 64,128,384,128],0,2,1,34,0,4,0,0,10,4,0,0,0,'2','10','4', - "0",[ -]). -poly('black',2,[ - 64,192,384,192],0,2,1,35,0,4,0,0,10,4,0,0,0,'2','10','4', - "0",[ -]). -poly('black',2,[ - 64,256,384,256],0,2,1,36,0,4,0,0,10,4,0,0,0,'2','10','4', - "0",[ -]). -poly('black',2,[ - 64,320,384,320],0,2,1,37,0,4,0,0,10,4,0,0,0,'2','10','4', - "0",[ -]). -box('black',128,448,192,512,5,2,1,56,0,0,0,0,0,'2',[ -]). -text('black',448,208,'Courier',0,17,2,1,0,1,84,28,61,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Entire array", - "5000 x 5000"]). -text('black',256,464,'Courier',0,17,2,1,0,1,84,28,63,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Single Chunk", - "1000 x 1000"]). -box('black',48,48,512,528,0,1,1,71,0,0,0,0,0,'1',[ -]). diff --git a/doc/html/compat.html b/doc/html/compat.html deleted file mode 100644 index fd46ca4..0000000 --- a/doc/html/compat.html +++ /dev/null @@ -1,271 +0,0 @@ - - - - Backward/Forward Compatability - - - -

    Backward/Forward Compatability

    - -

    The HDF5 development must proceed in such a manner as to - satisfy the following conditions: - -

      -
    1. HDF5 applications can produce data that HDF5 - applications can read and write and HDF4 applications can produce - data that HDF4 applications can read and write. The situation - that demands this condition is obvious.
    2. - -
    3. HDF5 applications are able to produce data that HDF4 applications - can read and HDF4 applications can subsequently modify the - file subject to certain constraints depending on the - implementation. This condition is for the temporary - situation where a consumer has neither been relinked with a new - HDF4 API built on top of the HDF5 API nor recompiled with the - HDF5 API.
    4. - -
    5. HDF5 applications can read existing HDF4 files and subsequently - modify the file subject to certain constraints depending on - the implementation. This is condition is for the temporary - situation in which the producer has neither been relinked with a - new HDF4 API built on top of the HDF5 API nor recompiled with - the HDF5 API, or the permanent situation of HDF5 consumers - reading archived HDF4 files.
    6. - - -

      There's at least one invarient: new object features introduced - in the HDF5 file format (like 2-d arrays of structs) might be - impossible to "translate" to a format that an old HDF4 - application can understand either because the HDF4 file format - or the HDF4 API has no mechanism to describe the object. - -

      What follows is one possible implementation based on how - Condition B was solved in the AIO/PDB world. It also attempts - to satisfy these goals: - -

        -
      1. The main HDF5 library contains as little extra baggage as - possible by either relying on external programs to take care - of compatability issues or by incorporating the logic of such - programs as optional modules in the HDF5 library. Conditions B - and C are separate programs/modules.
      2. - -
      3. No extra baggage not only means the library proper is small, - but also means it can be implemented (rather than migrated - from HDF4 source) from the ground up with minimal regard for - HDF4 thus keeping the logic straight forward.
      4. - -
      5. Compatability issues are handled behind the scenes when - necessary (and possible) but can be carried out explicitly - during things like data migration.
      6. -
      - -
      -

      Wrappers

      - -

      The proposed implementation uses wrappers to handle - compatability issues. A Format-X file is wrapped in a - Format-Y file by creating a Format-Y skeleton that replicates - the Format-X meta data. The Format-Y skeleton points to the raw - data stored in Format-X without moving the raw data. The - restriction is that raw data storage methods in Format-Y is a - superset of raw data storage methods in Format-X (otherwise the - raw data must be copied to Format-Y). We're assuming that meta - data is small wrt the entire file. - -

      The wrapper can be a separate file that has pointers into the - first file or it can be contained within the first file. If - contained in a single file, the file can appear as a Format-Y - file or simultaneously a Format-Y and Format-X file. - -

      The Format-X meta-data can be thought of as the original - wrapper around raw data and Format-Y is a second wrapper around - the same data. The wrappers are independend of one another; - modifying the meta-data in one wrapper causes the other to - become out of date. Modification of raw data doesn't invalidate - either view as long as the meta data that describes its storage - isn't modifed. For instance, an array element can change values - if storage is already allocated for the element, but if storage - isn't allocated then the meta data describing the storage must - change, invalidating all wrappers but one. - -

      It's perfectly legal to modify the meta data of one wrapper - without modifying the meta data in the other wrapper(s). The - illegal part is accessing the raw data through a wrapper which - is out of date. - -

      If raw data is wrapped by more than one internal wrapper - (internal means that the wrapper is in the same file as - the raw data) then access to that file must assume that - unreferenced parts of that file contain meta data for another - wrapper and cannot be reclaimed as free memory. - -


      -

      Implementation of Condition B

      - -

      Since this is a temporary situation which can't be - automatically detected by the HDF5 library, we must rely - on the application to notify the HDF5 library whether or not it - must satisfy Condition B. (Even if we don't rely on the - application, at some point someone is going to remove the - Condition B constraint from the library.) So the module that - handles Condition B is conditionally compiled and then enabled - on a per-file basis. - -

      If the application desires to produce an HDF4 file (determined - by arguments to H5Fopen), and the Condition B - module is compiled into the library, then H5Fclose - calls the module to traverse the HDF5 wrapper and generate an - additional internal or external HDF4 wrapper (wrapper specifics - are described below). If Condition B is implemented as a module - then it can benefit from the metadata already cached by the main - library. - -

      An internal HDF4 wrapper would be used if the HDF5 file is - writable and the user doesn't mind that the HDF5 file is - modified. An external wrapper would be used if the file isn't - writable or if the user wants the data file to be primarily HDF5 - but a few applications need an HDF4 view of the data. - -

      Modifying through the HDF5 library an HDF5 file that has - internal HDF4 wrapper should invalidate the HDF4 wrapper (and - optionally regenerate it when H5Fclose is - called). The HDF5 library must understand how wrappers work, but - not necessarily anything about the HDF4 file format. - -

      Modifying through the HDF5 library an HDF5 file that has an - external HDF4 wrapper will cause the HDF4 wrapper to become out - of date (but possibly regenerated during H5Fclose). - Note: Perhaps the next release of the HDF4 library should - insure that the HDF4 wrapper file has a more recent modification - time than the raw data file (the HDF5 file) to which it - points(?) - -

      Modifying through the HDF4 library an HDF5 file that has an - internal or external HDF4 wrapper will cause the HDF5 wrapper to - become out of date. However, there is now way for the old HDF4 - library to notify the HDF5 wrapper that it's out of date. - Therefore the HDF5 library must be able to detect when the HDF5 - wrapper is out of date and be able to fix it. If the HDF4 - wrapper is complete then the easy way is to ignore the original - HDF5 wrapper and generate a new one from the HDF4 wrapper. The - other approach is to compare the HDF4 and HDF5 wrappers and - assume that if they differ HDF4 is the right one, if HDF4 omits - data then it was because HDF4 is a partial wrapper (rather than - assume HDF4 deleted the data), and if HDF4 has new data then - copy the new meta data to the HDF5 wrapper. On the other hand, - perhaps we don't need to allow these situations (modifying an - HDF5 file with the old HDF4 library and then accessing it with - the HDF5 library is either disallowed or causes HDF5 objects - that can't be described by HDF4 to be lost). - -

      To convert an HDF5 file to an HDF4 file on demand, one simply - opens the file with the HDF4 flag and closes it. This is also - how AIO implemented backward compatability with PDB in its file - format. - -


      -

      Implementation of Condition C

      - -

      This condition must be satisfied for all time because there - will always be archived HDF4 files. If a pure HDF4 file (that - is, one without HDF5 meta data) is opened with an HDF5 library, - the H5Fopen builds an internal or external HDF5 - wrapper and then accesses the raw data through that wrapper. If - the HDF5 library modifies the file then the HDF4 wrapper becomes - out of date. However, since the HDF5 library hasn't been - released, we can at least implement it to disable and/or reclaim - the HDF4 wrapper. - -

      If an external and temporary HDF5 wrapper is desired, the - wrapper is created through the cache like all other HDF5 files. - The data appears on disk only if a particular cached datum is - preempted. Instead of calling H5Fclose on the HDF5 - wrapper file we call H5Fabort which immediately - releases all file resources without updating the file, and then - we unlink the file from Unix. - -


      -

      What do wrappers look like?

      - -

      External wrappers are quite obvious: they contain only things - from the format specs for the wrapper and nothing from the - format specs of the format which they wrap. - -

      An internal HDF4 wrapper is added to an HDF5 file in such a way - that the file appears to be both an HDF4 file and an HDF5 - file. HDF4 requires an HDF4 file header at file offset zero. If - a user block is present then we just move the user block down a - bit (and truncate it) and insert the minimum HDF4 signature. - The HDF4 dd list and any other data it needs are - appended to the end of the file and the HDF5 signature uses the - logical file length field to determine the beginning of the - trailing part of the wrapper. - -

      -

      - - - - - - - - - - - - - -
      HDF4 minimal file header. Its main job is to point to - the dd list at the end of the file.
      User-defined block which is truncated by the size of the - HDF4 file header so that the HDF5 super block file address - doesn't change.
      The HDF5 super block and data, unmodified by adding the - HDF4 wrapper.
      The main part of the HDF4 wrapper. The dd - list will have entries for all parts of the file so - hdpack(?) doesn't (re)move anything.
      -
      - -

      When such a file is opened by the HDF5 library for - modification it shifts the user block back down to address zero - and fills with zeros, then truncates the file at the end of the - HDF5 data or adds the trailing HDF4 wrapper to the free - list. This prevents HDF4 applications from reading the file with - an out of date wrapper. - -

      If there is no user block then we have a problem. The HDF5 - super block must be moved to make room for the HDF4 file header. - But moving just the super block causes problems because all file - addresses stored in the file are relative to the super block - address. The only option is to shift the entire file contents - by 512 bytes to open up a user block (too bad we don't have - hooks into the Unix i-node stuff so we could shift the entire - file contents by the size of a file system page without ever - performing I/O on the file :-) - -

      Is it possible to place an HDF5 wrapper in an HDF4 file? I - don't know enough about the HDF4 format, but I would suspect it - might be possible to open a hole at file address 512 (and - possibly before) by moving some things to the end of the file - to make room for the HDF5 signature. The remainder of the HDF5 - wrapper goes at the end of the file and entries are added to the - HDF4 dd list to mark the location(s) of the HDF5 - wrapper. - -


      -

      Other Thoughts

      - -

      Conversion programs that copy an entire HDF4 file to a separate, - self-contained HDF5 file and vice versa might be useful. - - - - -


      -
      Robb Matzke
      - - -Last modified: Wed Oct 8 12:34:42 EST 1997 - - - diff --git a/doc/html/cpplus/CppInterfaces.html b/doc/html/cpplus/CppInterfaces.html deleted file mode 100644 index f8f37f2..0000000 --- a/doc/html/cpplus/CppInterfaces.html +++ /dev/null @@ -1,1437 +0,0 @@ - - -HDF5 C++ API Interfaces - - - - - - - - - -
      -
      -                           HDF5 C++ Interfaces
      -                           ===================
      -
      -// HDF5 dataset and attribute have some common characteristics, so the
      -// term abstract dataset is used to name the element that can represent 
      -// both objects, dataset and attribute.
      -//
      -// Class AbstractDs is an abstract base class, from which Attribute and 
      -// DataSet inherit.  It provides the services that are common to both 
      -// Attribute and DataSet.  It also inherits from H5Object and passes down 
      -// the services that H5Object provides.
      -class AbstractDs : public H5Object
      -
      -	// Gets the dataspace of this abstract dataset - pure virtual
      -	virtual DataSpace getSpace() const = 0;
      -
      -        // Gets the class of the datatype that is used by this abstract 
      -	// dataset        
      -	H5T_class_t getTypeClass() const;
      -
      -	// Gets a copy of the datatype that this abstract dataset uses.  
      -	// Note that this datatype is a generic one and can only be accessed 
      -	// via generic member functions, i.e., member functions belong to 
      -	// DataType.  To get specific datatype, i.e. EnumType, FloatType, 
      -	// etc..., use the specific functions, that follow, instead.
      -	DataType getDataType() const;
      -
      -        // Gets a copy of the specific datatype of this abstract dataset
      -        EnumType getEnumType() const;
      -        CompType getCompType() const;
      -        IntType getIntType() const;
      -        FloatType getFloatType() const;
      -        StrType getStrType() const;
      -
      -	// Copy constructor
      -	AbstractDs( const AbstractDs& original );
      -
      -	virtual ~AbstractDs();
      -
      -// end of class AbstractDs
      -
      -// Atomic datatype can be an integer, float, string, or predefined datatype.
      -//
      -// Class AtomType is a base class, from which IntType, FloatType, StrType, 
      -// and PredType inherit.  It provides the services that are common to these
      -// subclasses.  It also inherits from DataType and passes down the  
      -// services that are common to all the datatypes.
      -class AtomType : public DataType
      -
      -	// Sets the total size for an atomic datatype. 
      -	void setSize( size_t size ) const;
      -
      -	// Returns the byte order of an atomic datatype. 
      -	H5T_order_t getOrder( string& order_string ) const;
      -
      -	// Sets the byte ordering of an atomic datatype. 
      -	void setOrder( H5T_order_t order ) const;
      -
      -	// Returns the precision of an atomic datatype. 
      -	size_t getPrecision() const;
      -
      -	// Sets the precision of an atomic datatype. 
      -	void setPrecision( size_t precision ) const;
      -
      -	// Gets the bit offset of the first significant bit. 
      -	int getOffset() const;
      -
      -	// Sets the bit offset of the first significant bit. 
      -	void setOffset( size_t offset ) const;
      -
      -	// Copy constructor 
      -	AtomType( const AtomType& original );
      -
      -	virtual ~AtomType();
      -
      -// end of class AtomType
      -
      -
      -// An attribute is an abstract dataset because it has some characteristics
      -// that a dataset also has, but not all.
      -//
      -// Class Attribute inherits from AbstractDs and provides accesses to an 
      -// attribute.
      -class Attribute : public AbstractDs
      -
      -	// Writes data to this attribute.
      -	void write(const DataType& mem_type, void *buf ) const;
      -
      -	// Reads data from this attribute.
      -	void read( const DataType& mem_type, void *buf ) const;
      -
      -	// Gets a copy of the dataspace for this attribute.
      -	virtual DataSpace getSpace() const;
      -
      -	// Gets the name of this attribute.
      -	string getName( size_t buf_size ) const;
      -
      -	// An attribute doesn't have the ability to iterate, simply because
      -	// it doesn't have any attributes associated with it.  Thus, the
      -	// implementation of this member which is inherited from H5Object
      -	// is overwritten to do nothing here.
      -	int iterateAttrs() const;
      -
      -        // Creates a copy of an existing attribute using the attribute id
      -        Attribute( const hid_t attr_id );
      -
      -	// Copy constructor
      -	Attribute( const Attribute& original );
      -
      -	virtual ~Attribute();
      -
      -
      -// CommonFG is a protocol class.  Its existence is simply to provide the
      -// common services that are provided by H5File and Group.  The file or 
      -// group in the context of this class is referred to as 'location'.
      -class CommonFG
      -	// Creates a new group at this location.
      -	Group createGroup( const string& name, size_t size_hint = 0 ) const;
      -	Group createGroup( const char* name, size_t size_hint = 0 ) const;
      -
      -	// Opens an existing group in a location.
      -	Group openGroup( const string& name ) const;
      -	Group openGroup( const char* name ) const;
      -
      -	// Creates a new dataset at this location.
      -	DataSet createDataSet( const string& name, const DataType& data_type, const DataSpace& data_space, const DSetCreatPropList& create_plist = DSetCreatPropList::DEFAULT ) const;
      -	DataSet createDataSet( const char* name, const DataType& data_type, const DataSpace& data_space, const DSetCreatPropList& create_plist = DSetCreatPropList::DEFAULT ) const;
      -
      -	// Opens an existing dataset at this location.
      -	DataSet openDataSet( const string& name ) const;
      -	DataSet openDataSet( const char* name ) const;
      -
      -	// Creates a link of the specified type from new_name to current_name;
      -	// both names are interpreted relative to this location.
      -	void link( H5G_link_t link_type, const string& curr_name, const string& new_name ) const;
      -	void link( H5G_link_t link_type, const char* curr_name, const char* new_name ) const;
      -
      -	// Removes the specified name at this location.
      -	void unlink( const string& name ) const;
      -	void unlink( const char* name ) const;
      -
      -	// Renames an HDF5 object at this location.
      -	void move( const string& src, const string& dst ) const;
      -	void move( const char* src, const char* dst ) const;
      -
      -	// Returns information about an HDF5 object, given by its name, at this location.
      -	void getObjinfo( const string& name, hbool_t follow_link, H5G_stat_t& statbuf ) const;
      -	void getObjinfo( const char* name, hbool_t follow_link, H5G_stat_t& statbuf ) const;
      -
      -	// Returns the name of the HDF5 object that the symbolic link points to.
      -	string getLinkval( const string& name, size_t size ) const;
      -	string getLinkval( const char* name, size_t size ) const;
      -
      -	// Sets the comment for the HDF5 object specified by its name.
      -	void setComment( const string& name, const string& comment ) const;
      -	void setComment( const char* name, const char* comment ) const;
      -
      -	// Retrieves comment for the HDF5 object specified by its name.
      -	string getComment( const string& name, size_t bufsize ) const;
      -	string getComment( const char* name, size_t bufsize ) const;
      -
      -	// Mounts the file 'child' onto this location.
      -	void mount( const string& name, H5File& child, PropList& plist ) const;
      -	void mount( const char* name, H5File& child, PropList& plist) const;
      -
      -	// Unmounts the file named 'name' from this location.
      -	void unmount( const string& name ) const;
      -	void unmount( const char* name ) const;
      -
      -	// Iterates over the elements of this location - not implemented in
      -	// C++ style yet
      -	int iterateElems( const string& name, int *idx, H5G_iterate_t op, void *op_data );
      -	int iterateElems( const char* name, int *idx, H5G_iterate_t op, void *op_data );
      -
      -	// Opens a generic named datatype at this location
      -	DataType openDataType( const string& name ) const;
      -	DataType openDataType( const char* name ) const;
      -
      -	// Opens a named enumeration datatype at this location
      -	EnumType openEnumType( const string& name ) const;
      -	EnumType openEnumType( const char* name ) const;
      -
      -	// Opens a named compound datatype at this location
      -	CompType openCompType( const string& name ) const;
      -	CompType openCompType( const char* name ) const;
      -
      -	// Opens a named integer datatype at this location
      -	IntType openIntType( const string& name ) const;
      -	IntType openIntType( const char* name ) const;
      -
      -	// Opens a named floating-point datatype at this location
      -	FloatType openFloatType( const string& name ) const;
      -	FloatType openFloatType( const char* name ) const;
      -
      -	// Opens a named string datatype at this location
      -	StrType openStrType( const string& name ) const;
      -	StrType openStrType( const char* name ) const;
      -
      -	// For H5File and Group to throw appropriate exception - pure virtual
      -	virtual void throwException() const = 0;
      -
      -	// Get id of the location, either group or file - pure virtual 
      -	virtual hid_t getLocId() const = 0; 
      -
      -	CommonFG();
      -	virtual ~CommonFG();
      -
      -// end of CommonFG declaration
      -
      -
      -// Class CompType inherits from DataType and provides accesses to a compound 
      -// datatype.
      -class CompType : public DataType
      -
      -	// Creates a new compound datatype, given the type's size.
      -	CompType( size_t size );
      -
      -	// Creates a compound datatype using an existing id.
      -	CompType( const hid_t existing_id );
      -
      -	// Gets the compound datatype of the specified dataset.
      -	CompType( const DataSet& dataset );
      -
      -	// Returns the number of members in this compound datatype. 
      -	int getNmembers() const;
      -
      -	// Returns the name of a member of this compound datatype. 
      -	string getMemberName( unsigned member_num ) const;
      -
      -	// Returns the offset of a member of this compound datatype. 
      -	size_t getMemberOffset( unsigned memb_no ) const;
      -
      -	// Returns the dimensionality of the specified member of this compound datatype. 
      -	int getMemberDims( int member_num, size_t* dims, int* perm ) const;
      -
      -	// Returns the type class of the specified member of this compound 
      -	// datatype.  It provides to the user a way of knowing what type 
      -	// to create another datatype of the same class.
      -	H5T_class_t getMemberClass( unsigned member_num ) const;
      -
      -	// Returns the generic datatype of the specified member in 
      -	// this compound datatype.
      -	DataType getMemberDataType( int member_num ) const;
      -
      -	// Returns the enumeration datatype of the specified member in 
      -	// this compound datatype.
      -	EnumType getMemberEnumType( int member_num ) const;
      -
      -	// Returns the compound datatype of the specified member in 
      -	// this compound datatype.
      -	CompType getMemberCompType( int member_num ) const;
      -
      -	// Returns the integer datatype of the specified member in 
      -	// this compound datatype.
      -	IntType getMemberIntType( int member_num ) const;
      -
      -	// Returns the floating-point datatype of the specified member in 
      -	// this compound datatype.
      -	FloatType getMemberFloatType( int member_num ) const;
      -
      -	// Returns the string datatype of the specified member in 
      -	// this compound datatype.
      -	StrType getMemberStrType( int member_num ) const;
      -
      -	// Adds a new member to this compound datatype.
      -	void insertMember( const string name, size_t offset, const DataType& new_member ) const;
      -
      -	// Recursively removes padding from within this compound datatype. 
      -	void pack() const;
      -
      -	// Default constructor
      -	CompType();
      -
      -	// Copy constructor
      -	CompType( const CompType& original );
      -
      -	virtual ~CompType();
      -
      -// end of class CompType
      -
      -
      -// Class DataSet inherits from AbstractDs and provides accesses to a dataset.
      -class DataSet : public AbstractDs
      -
      -	// Gets the dataspace of this dataset.
      -	virtual DataSpace getSpace() const;
      -
      -	// Gets the creation property list of this dataset.
      -	DSetCreatPropList getCreatePlist() const;
      -
      -	// Gets the storage size of this dataset.
      -	hsize_t getStorageSize() const;
      -
      -	// Reads the data of this dataset and stores it in the provided buffer.
      -	// The memory and file dataspaces and the transferring property list
      -	// can be defaults.
      -	void read( void* buf, const DataType& mem_type, const DataSpace& mem_space = DataSpace::ALL, const DataSpace& file_space = DataSpace::ALL, const DSetMemXferPropList& xfer_plist = DSetMemXferPropList::DEFAULT ) const;
      -
      -	// Writes the buffered data to this dataset.
      -	// The memory and file dataspaces and the transferring property list
      -	// can be defaults.
      -	void write( const void* buf, const DataType& mem_type, const DataSpace& mem_space = DataSpace::ALL, const DataSpace& file_space = DataSpace::ALL, const DSetMemXferPropList& xfer_plist = DSetMemXferPropList::DEFAULT ) const;
      -
      -	// Extends the dataset with unlimited dimension.
      -	void extend( const hsize_t* size ) const;
      -
      -	// Default constructor
      -	DataSet();
      -
      -	// Copy constructor
      -	DataSet( const DataSet& original );
      -
      -	virtual ~DataSet();
      -
      -// end of class DataSet
      -
      -
      -// Class DataSpace provides accesses to the dataspace.
      -class DataSpace : public IdComponent
      -
      -	// Default DataSpace objects
      -	static const DataSpace ALL;
      -
      -	// Creates a dataspace object given the space type.
      -	DataSpace( H5S_class_t type );
      -
      -	// Creates a simple dataspace.
      -	DataSpace( int rank, const hsize_t * dims, const hsize_t * maxdims = NULL);
      -
      -	// Makes copy of an existing dataspace.
      -	void copy( const DataSpace& like_space );
      -
      -	// Determines if this dataspace is a simple one.
      -	bool isSimple () const;
      -
      -	// Sets the offset of this simple dataspace.
      -	void offsetSimple ( const hssize_t* offset ) const;
      -
      -	// Retrieves dataspace dimension size and maximum size.
      -	int getSimpleExtentDims ( hsize_t *dims, hsize_t *maxdims = NULL ) const;
      -
      -	// Gets the dimensionality of this dataspace.
      -	int getSimpleExtentNdims () const;
      -
      -	// Gets the number of elements in this dataspace.
      -	hssize_t getSimpleExtentNpoints () const;
      -
      -	// Gets the current class of this dataspace.
      -	H5S_class_t getSimpleExtentType () const;
      -
      -	// Copies the extent of this dataspace.
      -	void extentCopy ( DataSpace& dest_space ) const;
      -
      -	// Sets or resets the size of this dataspace.
      -	void setExtentSimple( int rank, const hsize_t *current_size, const hsize_t *maximum_size = NULL ) const;
      -
      -	// Removes the extent from this dataspace.
      -	void setExtentNone () const;
      -
      -	// Gets the number of elements in this dataspace selection.
      -	hssize_t getSelectNpoints () const;
      -
      -	// Get number of hyperslab blocks.
      -	hssize_t getSelectHyperNblocks () const;
      -
      -	// Gets the list of hyperslab blocks currently selected.
      -	void getSelectHyperBlocklist( hsize_t startblock, hsize_t numblocks, hsize_t *buf ) const;
      -
      -	// Gets the number of element points in the current selection.
      -	hssize_t getSelectElemNpoints () const;
      -
      -	// Retrieves the list of element points currently selected.
      -	void getSelectElemPointlist ( hsize_t startpoint, hsize_t numpoints, hsize_t *buf ) const;
      -
      -	// Gets the bounding box containing the current selection.
      -	void getSelectBounds ( hsize_t* start, hsize_t* end ) const;
      -
      -	// Selects array elements to be included in the selection for 
      -	// this dataspace.
      -	void selectElements ( H5S_seloper_t op, const size_t num_elements, const hsize_t* coord[ ] ) const;
      -
      -	// Selects the entire dataspace.
      -	void selectAll () const;
      -
      -	// Resets the selection region to include no elements.
      -	void selectNone () const;
      -
      -	// Verifies that the selection is within the extent of the dataspace.
      -	bool selectValid () const;
      -
      -	// Selects a hyperslab region to add to the current selected region.
      -	void selectHyperslab( H5S_seloper_t op, const hsize_t *count, const hsize_t *start, const hsize_t *stride = NULL, const hsize_t *block = NULL ) const;
      -
      -	// Default constructor
      -	DataSpace();
      -
      -	// Create a dataspace object from a dataspace identifier
      -	DataSpace( const hid_t space_id );
      -
      -	// Copy constructor
      -	DataSpace( const DataSpace& original );
      -
      -	virtual ~DataSpace();
      -// end of class DataSpace
      -
      -
      -// HDF5 datatype can be an atom datatype, a compound datatype, or an 
      -// enumeration datatype.  A datatype is itself a kind of HDF5 object.
      -//
      -// Class DataType provides accesses to a generic HDF5 datatype.  It has 
      -// characteristics which AtomType, CompType, and EnumType inherit.  It also 
      -// inherits from H5Object and passes down the services to its subclasses.
      -class DataType : public H5Object
      -
      -	// Creates a datatype given its class and size.
      -	DataType( const H5T_class_t type_class, size_t size );
      -
      -	// Copies an existing datatype to this datatype instance.
      -	void copy( const DataType& like_type );
      -
      -	// Returns the datatype class identifier of this datatype. 
      -	H5T_class_t getClass() const;
      -
      -	// Commits a transient datatype to a file; this datatype becomes 
      -	// a named datatype which can be accessed from the location.
      -	void commit( H5Object& loc, const string& name ) const;
      -	void commit( H5Object& loc, const char* name ) const;
      -
      -	// Determines whether this datatype is a named datatype or 
      -	// a transient datatype. 
      -	bool committed() const;
      -
      -        // Finds a conversion function that can handle the conversion 
      -        // of this datatype to the given datatype, dest.
      -	H5T_conv_t find( const DataType& dest, H5T_cdata_t **pcdata ) const;
      -
      -	// Converts data from this datatype into the specified datatype, dest. 
      -	void convert( const DataType& dest, size_t nelmts, void *buf, void *background, PropList& plist ) const;
      -
      -	// Sets the overflow handler to a specified function. 
      -	void setOverflow(H5T_overflow_t func) const;
      -
      -	// Returns a pointer to the current global overflow function. 
      -	H5T_overflow_t getOverflow(void) const;
      -
      -	// Locks a datatype. 
      -	void lock() const;
      -
      -	// Returns the size of this datatype. 
      -	size_t getSize() const;
      -
      -	// Returns the base datatype from which a datatype is derived. 
      -	// Not implemented yet
      -	DataType getSuper() const;
      -
      -	// Registers a conversion function. 
      -	void registerFunc(H5T_pers_t pers, const string& name, const DataType& dest, H5T_conv_t func ) const;
      -	void registerFunc(H5T_pers_t pers, const char* name, const DataType& dest, H5T_conv_t func ) const;
      -
      -	// Removes a conversion function from all conversion paths. 
      -	void unregister( H5T_pers_t pers, const string& name, const DataType& dest, H5T_conv_t func ) const;
      -	void unregister( H5T_pers_t pers, const char* name, const DataType& dest, H5T_conv_t func ) const;
      -
      -	// Tags an opaque datatype. 
      -	void setTag( const string& tag ) const;
      -	void setTag( const char* tag ) const;
      -
      -	// Gets the tag associated with an opaque datatype. 
      -	string getTag() const;
      -
      -	// Creates a DataType using an existing id - this datatype is
      -	// not a predefined type
      -	DataType( const hid_t type_id, bool predtype = false );
      -
      -	// Default constructor
      -	DataType();
      -
      -	// Copy constructor
      -	DataType( const DataType& original );
      -
      -	virtual ~DataType();
      -
      -// end of class DataType
      -
      -
      -// Class DSetCreatPropList provides accesses to a dataset creation 
      -// property list.
      -class DSetCreatPropList : public PropList
      -
      -	// Default DSetCreatPropList object
      -	static const DSetCreatPropList DEFAULT;
      -
      -	// Copies a dataset creation property list using assignment statement.
      -	DSetCreatPropList& operator=( const DSetCreatPropList& rhs );
      -
      -	// Sets the type of storage used to store the raw data for the 
      -	// dataset that uses this property list.
      -	void setLayout( hid_t plist, H5D_layout_t layout ) const;
      -
      -	// Gets the layout of the raw data storage of the data that uses this
      -	// property list.
      -	H5D_layout_t getLayout() const;
      -
      -	// Sets the size of the chunks used to store a chunked layout dataset.
      -	void setChunk( int ndims, const hsize_t* dim ) const;
      -
      -	// Retrieves the size of the chunks used to store a chunked layout dataset.
      -	int getChunk( int max_ndims, hsize_t* dim ) const;
      -
      -	// Sets compression method and compression level
      -	void setDeflate( int level ) const;
      -
      -	// Sets a dataset fill value.
      -	void setFillValue( DataType& fvalue_type, const void* value ) const;
      -
      -	// Retrieves a dataset fill value.
      -	void getFillValue( DataType& fvalue_type, void* value ) const;
      -
      -	// Adds a filter to the filter pipeline
      -	void setFilter( H5Z_filter_t filter, unsigned int flags, size_t cd_nelmts, const unsigned int cd_values[] ) const;
      -
      -	// Returns the number of filters in the pipeline. 
      -	int getNfilters() const;
      -
      -	// Returns information about a filter in a pipeline.
      -	H5Z_filter_t getFilter( int filter_number, unsigned int& flags, size_t& cd_nelmts, unsigned int* cd_values, size_t namelen, char name[] ) const;
      -
      -	// Adds an external file to the list of external files.
      -	void setExternal( const char* name, off_t offset, hsize_t size ) const;
      -
      -	// Returns the number of external files for a dataset. 
      -	int getExternalCount() const;
      -
      -	// Returns information about an external file
      -	void getExternal( unsigned idx, size_t name_size, char* name, off_t& offset, hsize_t& size ) const;
      -
      -	// Creates a copy of an existing dataset creation property list 
      -	// using the property list id
      -	DSetCreatPropList( const hid_t plist_id );
      -
      -	// Default constructor
      -	DSetCreatPropList();
      -
      -	// Copy constructor
      -	DSetCreatPropList( const DSetCreatPropList& original );
      -
      -	virtual ~DSetCreatPropList();
      -
      -// end of class DSetCreatPropList
      -
      -
      -// Class DSetMemXferPropList provides accesses to a dataset memory and 
      -// transfer property list.
      -class DSetMemXferPropList : public PropList
      -
      -	// Default object for dataset memory and transfer property list
      -	static const DSetMemXferPropList DEFAULT;
      -
      -	// Copies a dataset memory and transfer property list using 
      -	// assignment statement
      -	DSetMemXferPropList& operator=( const DSetMemXferPropList& rhs );
      -
      -	// Sets type conversion and background buffers
      -	void setBuffer( size_t size, void* tconv, void* bkg ) const;
      -
      -	// Reads buffer settings
      -	size_t getBuffer( void** tconv, void** bkg ) const;
      -
      -	// Sets the dataset transfer property list status to TRUE or FALSE
      -	void setPreserve( bool status ) const;
      -
      -	// Checks status of the dataset transfer property list
      -	bool getPreserve() const;
      -
      -	// Indicates whether to cache hyperslab blocks during I/O
      -	void setHyperCache( bool cache, unsigned limit = 0 ) const;
      -
      -	// Returns information regarding the caching of hyperslab blocks during I/O
      -	void getHyperCache( bool& cache, unsigned& limit ) const;
      -
      -	// Sets B-tree split ratios for a dataset transfer property list 
      -	void setBtreeRatios( double left, double middle, double right ) const;
      -
      -	// Gets B-tree split ratios for a dataset transfer property list
      -	void getBtreeRatios( double& left, double& middle, double& right ) const;
      -
      -	// Sets the memory manager for variable-length datatype 
      -	// allocation in H5Dread and H5Dvlen_reclaim
      -	void setVlenMemManager( H5MM_allocate_t alloc, void* alloc_info, 
      -				H5MM_free_t free, void* free_info ) const;
      -
      -	// alloc and free are set to NULL, indicating that system 
      -	// malloc and free are to be used
      -	void setVlenMemManager() const;
      -
      -	// Gets the memory manager for variable-length datatype 
      -	// allocation in H5Dread and H5Tvlen_reclaim
      -	void getVlenMemManager( H5MM_allocate_t& alloc, void** alloc_info, 
      -				H5MM_free_t& free, void** free_info ) const;
      -
      -	// Sets the transfer mode - parallel mode, not currently supported
      -	//void setXfer( H5D_transfer_t data_xfer_mode = H5D_XFER_INDEPENDENT ) const;
      -
      -	// Gets the transfer mode - parallel mode, not currently supported
      -	//H5D_transfer_t getXfer() const;
      -
      -	// Creates a copy of an existing dataset memory and transfer 
      -	// property list using the property list id
      -	DSetMemXferPropList (const hid_t plist_id)
      -
      -	// Default constructor
      -	DSetMemXferPropList();
      -
      -	// Copy constructor
      -	DSetMemXferPropList( const DSetMemXferPropList& original );
      -
      -	// Default destructor
      -	virtual ~DSetMemXferPropList();
      -
      -// end of class DSetMemXferPropList
      -
      -
      -class EnumType : public DataType
      -
      -	// Creates an empty enumeration datatype based on a native signed 
      -	// integer type, whose size is given by size.
      -	EnumType( size_t size );
      -
      -	// Gets the enum datatype of the specified dataset
      -	EnumType( const DataSet& dataset );  // H5Dget_type
      -
      -	// Creates a new enum datatype based on an integer datatype
      -	EnumType( const IntType& data_type );  // H5Tenum_create
      -
      -	// Inserts a new member to this enumeration type. 
      -	void insert( const string& name, void *value ) const;
      -	void insert( const char* name, void *value ) const;
      -
      -	// Returns the symbol name corresponding to a specified member 
      -	// of this enumeration datatype. 
      -	string nameOf( void *value, size_t size ) const;
      -
      -	// Returns the value corresponding to a specified member of this 
      -	// enumeration datatype. 
      -	void valueOf( const string& name, void *value ) const;
      -	void valueOf( const char* name, void *value ) const;
      -
      -	// Returns the value of an enumeration datatype member
      -	void getMemberValue( unsigned memb_no, void *value ) const;
      -
      -	// Default constructor
      -	EnumType();
      -
      -	// Creates an enumeration datatype using an existing id
      -	EnumType( const hid_t existing_id );
      -
      -	// Copy constructor
      -	EnumType( const EnumType& original );
      -
      -	virtual ~EnumType();
      -// end of class EnumType
      -
      -
      -class Exception
      -
      -	// Creates an exception with a detailed message
      -	Exception( const string& message );
      -
      -	Exception( const char* message);
      -
      -	// Returns the character string that describes an error specified by
      -	// a major error number.
      -	string getMajorString( H5E_major_t major_num ) const;
      -
      -	// Returns the character string that describes an error specified by
      -	// a minor error number.
      -	string getMinorString( H5E_minor_t minor_num ) const;
      -
      -	// Returns the detailed message set at the time the exception is thrown
      -	string getDetailMesg() const;
      -
      -	// Turns on the automatic error printing.
      -	void setAutoPrint( H5E_auto_t func, 
      -				void* client_data ) const;
      -
      -	// Turns off the automatic error printing.
      -	static void dontPrint();
      -
      -	// Retrieves the current settings for the automatic error stack 
      -	// traversal function and its data.
      -	void getAutoPrint( H5E_auto_t& func, 
      -				void** client_data ) const;
      -
      -	// Clears the error stack for the current thread.
      -	void clearErrorStack() const;
      -
      -	// Walks the error stack for the current thread, calling the 
      -	// specified function.
      -	void walkErrorStack( H5E_direction_t direction, 
      -				H5E_walk_t func, void* client_data ) const;
      -
      -	// Default error stack traversal callback function that prints 
      -	// error messages to the specified output stream.
      -	void walkDefErrorStack( int n, H5E_error_t& err_desc,
      -				void* client_data ) const;
      -
      -	// Prints the error stack in a default manner.
      -	//void printError() const;
      -	void printError( FILE* stream = NULL ) const;
      -
      -	// Creates an exception with no message
      -	Exception();
      -
      -	// copy constructor
      -	Exception( const Exception& original );
      -
      -// end of class Exception
      -
      -
      -// Class FileIException inherits from Exception to provide exception 
      -// handling for H5File.
      -class FileIException : public Exception
      -	FileIException();
      -	FileIException( string message );
      -// end of class FileIException
      -
      -
      -// Class GroupIException inherits from Exception to provide exception 
      -// handling for Group.
      -class GroupIException : public Exception
      -	GroupIException();
      -	GroupIException( string message );
      -// end of class GroupIException
      -
      -
      -// Class DataSpaceIException inherits from Exception to provide exception 
      -// handling for DataSpace.
      -class DataSpaceIException : public Exception
      -	DataSpaceIException();
      -	DataSpaceIException( string message );
      -// end of class DataSpaceIException
      -
      -
      -// Class DataTypeIException inherits from Exception to provide exception 
      -// handling for DataType.
      -class DataTypeIException : public Exception
      -	DataTypeIException();
      -	DataTypeIException( string message );
      -// end of class DataTypeIException
      -
      -
      -// Class PropListIException inherits from Exception to provide exception 
      -// handling for PropList.
      -class PropListIException : public Exception
      -	PropListIException();
      -	PropListIException( string message );
      -// end of class PropListIException
      -
      -
      -// Class DataSetIException inherits from Exception to provide exception 
      -// handling for DataSet.
      -class DataSetIException : public Exception
      -	DataSetIException();
      -	DataSetIException( string message );
      -// end of class DataSetIException
      -
      -
      -// Class AttributeIException inherits from Exception to provide exception 
      -// handling for Attribute.
      -class AttributeIException : public Exception
      -	AttributeIException();
      -	AttributeIException( string message );
      -// end of class AttributeIException
      -
      -
      -// Class LibraryIException inherits from Exception to provide exception 
      -// handling for H5Library.
      -class LibraryIException : public Exception
      -	LibraryIException();
      -	LibraryIException( string message );
      -// end of class LibraryIException
      -
      -
      -// Class IdComponentException inherits from Exception to provide exception 
      -// handling for IdComponent.
      -class IdComponentException : public Exception
      -	IdComponentException();
      -	IdComponentException( string message );
      -// end of class IdComponentException
      -
      -
      -// Class FileAccPropList provides accesses to a file access property list.
      -class FileAccPropList : public PropList
      -
      -	// Default file access property list object 
      -	static const FileAccPropList DEFAULT;
      -
      -	// Copies a file access property list using assignment statement.
      -	FileAccPropList& operator=( const FileAccPropList& rhs );
      -
      -	// Sets alignment properties of this file access property list.
      -	void setAlignment( hsize_t threshold = 1, hsize_t alignment = 1 ) const;
      -
      -	// Retrieves the current settings for alignment properties from
      -	// this file access property list.
      -	void getAlignment( hsize_t& threshold, hsize_t& alignment ) const;
      -
      -	// Sets the meta data cache and raw data chunk cache parameters.
      -	void setCache( int mdc_nelmts, size_t rdcc_nelmts, size_t rdcc_nbytes, double rdcc_w0 ) const;
      -
      -	// Retrieves maximum sizes of data caches and the preemption 
      -	// policy value.
      -	void getCache( int& mdc_nelmts, size_t& rdcc_nelmts, size_t& rdcc_nbytes, double& rdcc_w0 ) const;
      -
      -	// Sets garbage collecting references flag.
      -	void setGcReferences( unsigned gc_ref = 0 ) const;
      -
      -	// Returns garbage collecting references setting.
      -	unsigned getGcReferences() const;
      -
      -	// Creates a copy of an existing file access property list
      -	// using the property list id.
      -	FileAccPropList (const hid_t plist_id);
      -
      -	// Default constructor
      -	FileAccPropList();
      -
      -	// Copy constructor
      -	FileAccPropList( const FileAccPropList& original );
      -
      -	// Default destructor
      -	virtual ~FileAccPropList();
      -
      -// end of class FileAccPropList
      -
      -
      -// Class FileCreatPropList provides accesses to a file creation property list.
      -class FileCreatPropList : public PropList
      -
      -	// Default file creation property list object
      -	static const FileCreatPropList DEFAULT;
      -	
      -	// Copies a file creation property list using assignment statement.
      -	FileCreatPropList& operator=( const FileCreatPropList& rhs );
      -
      -	// Retrieves version information for various parts of a file.
      -	void getVersion( unsigned& boot, unsigned& freelist, unsigned& stab, unsigned& shhdr ) const;
      -
      -	// Sets the userblock size field of a file creation property list.
      -	void setUserblock( hsize_t size ) const;
      -
      -	// Gets the size of a user block in this file creation property list.
      -	hsize_t getUserblock() const;
      -
      -	// Sets file size-of addresses and sizes.
      -	void setSizes( size_t sizeof_addr = 4, size_t sizeof_size = 4 ) const;
      -
      -	// Retrieves the size-of address and size quantities stored in a 
      -	// file according to this file creation property list.
      -	void getSizes( size_t& sizeof_addr, size_t& sizeof_size ) const;
      -
      -	// Sets the size of parameters used to control the symbol table nodes.
      -	void setSymk( unsigned int_nodes_k, unsigned leaf_nodes_k ) const;
      -
      -	// Retrieves the size of the symbol table B-tree 1/2 rank and the
      -	// symbol table leaf node 1/2 size.
      -	void getSymk( unsigned& int_nodes_k, unsigned& leaf_nodes_k ) const;
      -
      -	// Sets the size of parameter used to control the B-trees for
      -	// indexing chunked datasets.
      -	void setIstorek( unsigned ik ) const;
      -
      -	// Returns the 1/2 rank of an indexed storage B-tree.
      -	unsigned getIstorek() const;
      -
      -	// Creates a copy of an existing file create property list
      -	// using the property list id.
      -	FileCreatPropList (const hid_t plist_id);
      -
      -	// Default constructor
      -	FileCreatPropList();
      -
      -	// Copy constructor
      -	FileCreatPropList( const FileCreatPropList& original );
      -
      -	// Default destructor
      -	virtual ~FileCreatPropList();
      -
      -// end of class FileCreatPropList
      -
      -// Class H5File provides accesses to an HDF5 file.  It uses the services
      -// provided by CommonFG beside inheriting the HDF5 id management from the
      -// IdComponent class.
      -class H5File : public IdComponent, public CommonFG
      -
      -	// Creates or opens an HDF5 file.  The file creation and access 
      -	// property lists can be default.
      -	H5File( const string& name, unsigned int flags, const FileCreatPropList& create_plist = FileCreatPropList::DEFAULT, const FileAccPropList& access_plist = FileAccPropList::DEFAULT );
      -	H5File( const char* name, unsigned int flags, const FileCreatPropList& create_plist = FileCreatPropList::DEFAULT, const FileAccPropList& access_plist = FileAccPropList::DEFAULT );
      -
      -	// Throw file exception - used by CommonFG to specifically throw
      -	// FileIException.
      -	virtual void throwException() const;
      -
      -	// Determines if a file, specified by its name, is in HDF5 format.
      -	static bool isHdf5(const string& name );
      -	static bool isHdf5(const char* name );
      -
      -	// Reopens this file.
      -	void reopen();
      -
      -	// Gets the creation property list of this file.
      -	FileCreatPropList getCreatePlist() const;
      -
      -	// Gets the access property list of this file.
      -	FileAccPropList getAccessPlist() const;
      -
      -	// Copy constructor
      -	H5File(const H5File& original );
      -
      -	virtual ~H5File();
      -
      -// end of class H5File
      -
      -
      -// Class FloatType inherits from AtomType and provides accesses to a 
      -// floating-point datatype.
      -class FloatType : public AtomType
      -
      -        // Creates a floating-point type using a predefined type.
      -        FloatType( const PredType& pred_type );
      -
      -	// Gets the floating-point datatype of the specified dataset.
      -	FloatType( const DataSet& dataset );
      -
      -	// Retrieves floating point datatype bit field information. 
      -	void getFields( size_t& spos, size_t& epos, size_t& esize, size_t& mpos, size_t& msize ) const;
      -
      -	// Sets locations and sizes of floating point bit fields. 
      -	void setFields( size_t spos, size_t epos, size_t esize, size_t mpos, size_t msize ) const;
      -
      -	// Retrieves the exponent bias of a floating-point type. 
      -	size_t getEbias() const;
      -
      -	// Sets the exponent bias of a floating-point type. 
      -	void setEbias( size_t ebias ) const;
      -
      -	// Retrieves mantissa normalization of a floating-point datatype. 
      -	H5T_norm_t getNorm( string& norm_string ) const;
      -
      -	// Sets the mantissa normalization of a floating-point datatype. 
      -	void setNorm( H5T_norm_t norm ) const;
      -
      -	// Retrieves the internal padding type for unused bits in 
      -	// floating-point datatypes. 
      -	H5T_pad_t getInpad( string& pad_string ) const;
      -	
      -	// Fills unused internal floating point bits. 
      -	void setInpad( H5T_pad_t inpad ) const;
      -
      -	// Default constructor
      -	FloatType();
      -
      -	// Creates a floating-point datatype using an existing id.
      -	FloatType( const hid_t existing_id );
      -
      -	// Copy constructor
      -	FloatType( const FloatType& original );
      -
      -	virtual ~FloatType();
      -
      -// end of class FloatType
      -
      -
      -// Class Group provides accesses to an HDF5 group.  As H5File, it uses the 
      -// services provided by CommonFG.  This class also inherits from H5Object.
      -class Group : public H5Object, public CommonFG
      -   public:
      -
      -        // Throw group exception - used by CommonFG to specifically throw
      -	// GroupIException.
      -        virtual void throwException() const;
      -
      -	// Default constructor
      -	Group();
      -
      -	// Copy constructor
      -	Group( const Group& original );
      -
      -	virtual ~Group();
      -
      -// end of class Group
      -
      -// Class IdComponent provides a mechanism to handle reference counting 
      -// for an identifier of any HDF5 object.
      -class IdComponent
      -	// Sets the identifier of this object to a new value.
      -	void setId( hid_t new_id );
      -
      -	// Creates an object to hold an HDF5 identifier.
      -	IdComponent( const hid_t h5_id );
      -
      -	// Gets the value of the current HDF5 object id which is held
      -	// by this IdComponent object.
      -	hid_t getId () const;
      -
      -	// Increment reference counter.
      -	void incRefCount();
      -
      -	// Decrement reference counter.
      -	void decRefCount();
      -
      -	// Get the reference counter to this identifier.
      -	int getCounter();
      -
      -	// Decrements the reference counter then determines if there are 
      -	// no more reference to this object.
      -	bool noReference();
      -
      -	// Reset this object by deleting its reference counter of the old id.
      -	void reset();
      -
      -	// Copy constructor
      -	IdComponent( const IdComponent& original );
      -
      -	// Destructor
      -	virtual ~IdComponent();
      -
      -}; // end class IdComponent
      -
      -
      -// Class IntType inherits from AtomType and provides accesses to 
      -// integer datatypes.
      -class IntType : public AtomType
      -
      -	// Creates a integer type using a predefined type.
      -	IntType( const PredType& pred_type );
      -
      -	// Gets the integer datatype of the specified dataset.
      -	IntType( const DataSet& dataset );
      -
      -	// Retrieves the sign type for an integer type.
      -	H5T_sign_t getSign() const;
      -
      -	// Sets the sign proprety for an integer type. 
      -	void setSign( H5T_sign_t sign ) const;
      -
      -	// Default constructor
      -	IntType();
      -
      -	// Creates a integer datatype using an existing id.
      -	IntType( const hid_t existing_id );
      -
      -	// Copy constructor
      -	IntType( const IntType& original );
      -
      -	virtual ~IntType();
      -
      -// end of class IntType
      -
      -
      -// Class H5Library provides accesses to the HDF5 library.  All of its
      -// member functions are static.
      -class H5Library
      -
      -	// Initializes the HDF5 library. 
      -	static void open(); 
      -
      -	// Flushes all data to disk, closes files, and cleans up memory. 
      -	static void close(); 
      -
      -	// Instructs library not to install atexit cleanup routine
      -	static void dontAtExit(); 
      -
      -	// Returns the HDF library release number. 
      -	static void getLibVersion( unsigned& majnum, unsigned& minnum, unsigned& relnum ); 
      -
      -	// Verifies that the arguments match the version numbers compiled
      -	// into the library
      -	static void checkVersion( unsigned majnum, unsigned minnum, unsigned relnum ); 
      -
      -// end of class H5Library
      -
      -
      -// An HDF5 object can be a group, dataset, attribute, or named datatype.
      -//
      -// Class H5Object provides the services that are typical to an HDF5 object
      -// so Group, DataSet, Attribute, and DataType can use them.  It also 
      -// inherits the HDF5 id management from the class IdComponent.
      -class H5Object : public IdComponent
      -
      -	// Flushes all buffers associated with this HDF5 object to disk.
      -	void flush( H5F_scope_t scope ) const;
      -
      -	// Creates an attribute for a group, dataset, or named datatype.
      -	// PropList is currently not used, it should always be default.
      -	Attribute createAttribute( const char* name, const DataType& type, const DataSpace& space, const PropList& create_plist = PropList::DEFAULT ) const;
      -	Attribute createAttribute( const string& name, const DataType& type, const DataSpace& space, const PropList& create_plist = PropList::DEFAULT ) const;
      -
      -	// Opens an attribute that belongs to this object, given the 
      -	// attribute name.
      -	Attribute openAttribute( const string& name ) const;
      -	Attribute openAttribute( const char* name ) const;
      -
      -	// Opens an attribute that belongs to this object, given the
      -	// attribute index.
      -	Attribute openAttribute( const unsigned int idx ) const;
      -
      -	// Iterate user's function over the attributes of this HDF5 object
      -	int iterateAttrs( attr_operator_t user_op, unsigned* idx = NULL, void* op_data = NULL );
      -
      -	// Determines the number of attributes attached to this HDF5 object.
      -	int getNumAttrs() const;
      -
      -	// Removes an attribute from this HDF5 object, given the attribute 
      -	// name.
      -	void removeAttr( const string& name ) const;
      -	void removeAttr( const char* name ) const;
      -
      -	// Copy constructor
      -	H5Object( const H5Object& original );
      -
      -	virtual ~H5Object();
      -
      -// end of class H5Object
      -
      -
      -// Class PredType contains all the predefined datatype objects that are
      -// currently available.
      -class PredType : public AtomType
      -
      -	static const PredType STD_I8BE;
      -	static const PredType STD_I8LE;
      -	static const PredType STD_I16BE;
      -	static const PredType STD_I16LE;
      -	static const PredType STD_I32BE;
      -	static const PredType STD_I32LE;
      -	static const PredType STD_I64BE;
      -	static const PredType STD_I64LE;
      -	static const PredType STD_U8BE;
      -	static const PredType STD_U8LE;
      -	static const PredType STD_U16BE;
      -	static const PredType STD_U16LE;
      -	static const PredType STD_U32BE;
      -	static const PredType STD_U32LE;
      -	static const PredType STD_U64BE;
      -	static const PredType STD_U64LE;
      -	static const PredType STD_B8BE;
      -	static const PredType STD_B8LE;
      -	static const PredType STD_B16BE;
      -	static const PredType STD_B16LE;
      -	static const PredType STD_B32BE;
      -	static const PredType STD_B32LE;
      -	static const PredType STD_B64BE;
      -	static const PredType STD_B64LE;
      -	static const PredType STD_REF_OBJ;
      -	static const PredType STD_REF_DSETREG;
      -
      -	static const PredType C_S1;
      -	static const PredType FORTRAN_S1;
      -
      -	static const PredType IEEE_F32BE;
      -	static const PredType IEEE_F32LE;
      -	static const PredType IEEE_F64BE;
      -	static const PredType IEEE_F64LE;
      -
      -	static const PredType UNIX_D32BE;
      -	static const PredType UNIX_D32LE;
      -	static const PredType UNIX_D64BE;
      -	static const PredType UNIX_D64LE;
      -
      -	static const PredType INTEL_I8;
      -	static const PredType INTEL_I16;
      -	static const PredType INTEL_I32;
      -	static const PredType INTEL_I64;
      -	static const PredType INTEL_U8;
      -	static const PredType INTEL_U16;
      -	static const PredType INTEL_U32;
      -	static const PredType INTEL_U64;
      -	static const PredType INTEL_B8;
      -	static const PredType INTEL_B16;
      -	static const PredType INTEL_B32;
      -	static const PredType INTEL_B64;
      -	static const PredType INTEL_F32;
      -	static const PredType INTEL_F64;
      -
      -	static const PredType ALPHA_I8;
      -	static const PredType ALPHA_I16;
      -	static const PredType ALPHA_I32;
      -	static const PredType ALPHA_I64;
      -	static const PredType ALPHA_U8;
      -	static const PredType ALPHA_U16;
      -	static const PredType ALPHA_U32;
      -	static const PredType ALPHA_U64;
      -	static const PredType ALPHA_B8;
      -	static const PredType ALPHA_B16;
      -	static const PredType ALPHA_B32;
      -	static const PredType ALPHA_B64;
      -	static const PredType ALPHA_F32;
      -	static const PredType ALPHA_F64;
      -
      -	static const PredType MIPS_I8;
      -	static const PredType MIPS_I16;
      -	static const PredType MIPS_I32;
      -	static const PredType MIPS_I64;
      -	static const PredType MIPS_U8;
      -	static const PredType MIPS_U16;
      -	static const PredType MIPS_U32;
      -	static const PredType MIPS_U64;
      -	static const PredType MIPS_B8;
      -	static const PredType MIPS_B16;
      -	static const PredType MIPS_B32;
      -	static const PredType MIPS_B64;
      -	static const PredType MIPS_F32;
      -	static const PredType MIPS_F64;
      -
      -	static const PredType NATIVE_CHAR;
      -	static const PredType NATIVE_SCHAR;
      -	static const PredType NATIVE_UCHAR;
      -	static const PredType NATIVE_SHORT;
      -	static const PredType NATIVE_USHORT;
      -	static const PredType NATIVE_INT;
      -	static const PredType NATIVE_UINT;
      -	static const PredType NATIVE_LONG;
      -	static const PredType NATIVE_ULONG;
      -	static const PredType NATIVE_LLONG;
      -	static const PredType NATIVE_ULLONG;
      -	static const PredType NATIVE_FLOAT;
      -	static const PredType NATIVE_DOUBLE;
      -	static const PredType NATIVE_LDOUBLE;
      -	static const PredType NATIVE_B8;
      -	static const PredType NATIVE_B16;
      -	static const PredType NATIVE_B32;
      -	static const PredType NATIVE_B64;
      -	static const PredType NATIVE_OPAQUE;
      -	static const PredType NATIVE_HSIZE;
      -	static const PredType NATIVE_HSSIZE;
      -	static const PredType NATIVE_HERR;
      -	static const PredType NATIVE_HBOOL;
      -
      -	static const PredType NATIVE_INT8;
      -	static const PredType NATIVE_UINT8;
      -	static const PredType NATIVE_INT_LEAST8;
      -	static const PredType NATIVE_UINT_LEAST8;
      -	static const PredType NATIVE_INT_FAST8;
      -	static const PredType NATIVE_UINT_FAST8;
      -
      -	static const PredType NATIVE_INT16;
      -	static const PredType NATIVE_UINT16;
      -	static const PredType NATIVE_INT_LEAST16;
      -	static const PredType NATIVE_UINT_LEAST16;
      -	static const PredType NATIVE_INT_FAST16;
      -	static const PredType NATIVE_UINT_FAST16;
      -
      -	static const PredType NATIVE_INT32;
      -	static const PredType NATIVE_UINT32;
      -	static const PredType NATIVE_INT_LEAST32;
      -	static const PredType NATIVE_UINT_LEAST32;
      -	static const PredType NATIVE_INT_FAST32;
      -	static const PredType NATIVE_UINT_FAST32;
      -
      -	static const PredType NATIVE_INT64;
      -	static const PredType NATIVE_UINT64;
      -	static const PredType NATIVE_INT_LEAST64;
      -	static const PredType NATIVE_UINT_LEAST64;
      -	static const PredType NATIVE_INT_FAST64;
      -	static const PredType NATIVE_UINT_FAST64;
      -
      -	// Copy constructor
      -	PredType( const PredType& original );
      -
      -	// Default destructor
      -	virtual ~PredType();
      -
      -   protected:
      -	// Default constructor
      -	PredType();
      -
      -	// Creates a pre-defined type using an HDF5 pre-defined constant
      -	PredType( const hid_t predtype_id );  // used by the library only
      -
      -// end of class PredType
      -
      -
      -// An HDF5 property list can be a file creation property list, a file
      -// access property list, a dataset creation property list, or a dataset
      -// memory and transfer property list.  
      -//
      -// Class PropList provides accesses to an HDF5 property list.  Its
      -// services are inherited by classes FileCreatPropList, FileAccPropList,
      -// DSetCreatPropList, and DSetMemXferPropList.  It also inherits the HDF5
      -// id management from the class IdComponent.
      -class PropList : public IdComponent
      -
      -	// Default property list object
      -        static const PropList DEFAULT;
      -
      -	// Creates a property list given the property list type.
      -	PropList( H5P_class_t type );
      -
      -	// Makes a copy of the given property list.
      -	void copy( const PropList& like_plist );
      -
      -	// Gets the class of this property list, i.e. H5P_FILE_CREATE,
      -	// H5P_FILE_ACCESS, ...
      -	H5P_class_t getClass() const;
      -
      -	// Default constructor
      -	PropList();
      -
      -	// Copy constructor
      -	PropList( const PropList& original );
      -
      -	// Creates a default property list or creates a copy of an 
      -	// existing property list giving the property list id
      -	PropList( const hid_t plist_id );
      -
      -	virtual ~PropList();
      -
      -// end of class PropList
      -
      -// Class RefCounter provides a reference counting mechanism.  It is used
      -// mainly by IdComponent to keep track of the references to an HDF5 object 
      -// identifier.
      -class RefCounter
      -
      -	// Returns the value of the counter.
      -        int getCounter () const;
      -
      -	// Increments and decrements the counter.
      -        void increment();
      -        void decrement();
      -
      -	// This bool function is used to determine whether to close an
      -	// HDF5 object when there are no more reference to that object.
      -	// It decrements the counter, then returns true if there are no
      -	// other object references the associated identifier.  When the
      -	// function returns true, the associated identifier can be closed
      -	// safely.
      -	bool noReference();
      -
      -	// Default constructor
      -	RefCounter();
      -
      -	~RefCounter();
      -
      -// end of class RefCounter
      -
      -
      -// Class StrType inherits from AtomType and provides accesses to a 
      -// string datatype.
      -class StrType : public AtomType
      -   public:
      -	// Creates a string type using a predefined type.
      -	StrType( const PredType& pred_type );
      -
      -        // Gets the string datatype of the specified dataset.
      -	StrType( const DataSet& dataset );
      -
      -	// Returns the character set type of this string datatype. 
      -	H5T_cset_t getCset() const;
      -
      -	// Sets character set to be used. 
      -	void setCset( H5T_cset_t cset ) const;
      -
      -	// Returns the string padding method for this string datatype. 
      -	H5T_str_t getStrpad() const;
      -
      -	// Defines the storage mechanism for character strings. 
      -	void setStrpad( H5T_str_t strpad ) const;
      -
      -	// Default constructor 
      -	StrType();
      -
      -	// Copy constructor
      -	StrType( const StrType& original );
      -
      -	// Creates a string datatype using an existing id.
      -	StrType( const hid_t existing_id );
      -
      -	virtual ~StrType();
      -// end of class StrType
      -
      -
      -// This template function, resetIdComponent, is used to reset an 
      -// IdComponent object, which includes closing the associated HDF5 
      -// identifier if it has no other references.
      -// 'Type' can be of the following classes: Attribute, DataSet, DataSpace,
      -// DataType, H5File, Group, and PropList.
      -template 
      -void resetIdComponent( 
      -	Type* obj )	// pointer to object to be reset
      -
      -
      - -
      - - -
      -HDF Help Desk -
      -Describes HDF5 Release 1.7, the unreleased development branch; working toward HDF5 Release 1.8.0 -
      - -Last modified: 17 December 2000 - - - - diff --git a/doc/html/cpplus/CppUserNotes.doc b/doc/html/cpplus/CppUserNotes.doc deleted file mode 100644 index c14d3d6..0000000 Binary files a/doc/html/cpplus/CppUserNotes.doc and /dev/null differ diff --git a/doc/html/cpplus/CppUserNotes.pdf b/doc/html/cpplus/CppUserNotes.pdf deleted file mode 100644 index 7d0064f..0000000 Binary files a/doc/html/cpplus/CppUserNotes.pdf and /dev/null differ diff --git a/doc/html/cpplus/Makefile.am b/doc/html/cpplus/Makefile.am deleted file mode 100644 index 81af45e..0000000 --- a/doc/html/cpplus/Makefile.am +++ /dev/null @@ -1,17 +0,0 @@ -# HDF5 Library Doc Makefile(.in) -# -# Copyright (C) 1997, 2002 -# National Center for Supercomputing Applications. -# All rights reserved. -# -## -## Makefile.am -## Run automake to generate a Makefile.in from this file. -# - -include $(top_srcdir)/config/commence-doc.am - -localdocdir = $(docdir)/hdf5/cpplus - -# Public doc files (to be installed)... -localdoc_DATA=CppInterfaces.html CppUserNotes.doc CppUserNotes.pdf diff --git a/doc/html/cpplus/Makefile.in b/doc/html/cpplus/Makefile.in deleted file mode 100644 index 434d2d7..0000000 --- a/doc/html/cpplus/Makefile.in +++ /dev/null @@ -1,485 +0,0 @@ -# Makefile.in generated by automake 1.9.5 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005 Free Software Foundation, Inc. -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# HDF5 Library Doc Makefile(.in) -# -# Copyright (C) 1997, 2002 -# National Center for Supercomputing Applications. -# All rights reserved. -# -# - -srcdir = @srcdir@ -top_srcdir = @top_srcdir@ -VPATH = @srcdir@ -pkgdatadir = $(datadir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -top_builddir = ../../.. -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -INSTALL = @INSTALL@ -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ - $(top_srcdir)/config/commence-doc.am \ - $(top_srcdir)/config/commence.am -subdir = doc/html/cpplus -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/configure.in -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(SHELL) $(top_srcdir)/bin/mkinstalldirs -CONFIG_HEADER = $(top_builddir)/src/H5config.h -CONFIG_CLEAN_FILES = -SOURCES = -DIST_SOURCES = -am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; -am__vpath_adj = case $$p in \ - $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ - *) f=$$p;; \ - esac; -am__strip_dir = `echo $$p | sed -e 's|^.*/||'`; -am__installdirs = "$(DESTDIR)$(localdocdir)" -localdocDATA_INSTALL = $(INSTALL_DATA) -DATA = $(localdoc_DATA) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) - -# Set the paths for AFS installs of autotools for Linux machines -# Ideally, these tools should never be needed during the build. -ACLOCAL = /afs/ncsa/projects/hdf/packages/automake_1.9.5/Linux_2.4/bin/aclocal -I /afs/ncsa/projects/hdf/packages/libtool_1.5.14/Linux_2.4/share/aclocal -ADD_PARALLEL_FILES = @ADD_PARALLEL_FILES@ -AMDEP_FALSE = @AMDEP_FALSE@ -AMDEP_TRUE = @AMDEP_TRUE@ -AMTAR = @AMTAR@ -AM_MAKEFLAGS = @AM_MAKEFLAGS@ -AR = @AR@ -AUTOCONF = /afs/ncsa/projects/hdf/packages/autoconf_2.59/Linux_2.4/bin/autoconf -AUTOHEADER = /afs/ncsa/projects/hdf/packages/autoconf_2.59/Linux_2.4/bin/autoheader -AUTOMAKE = /afs/ncsa/projects/hdf/packages/automake_1.9.5/Linux_2.4/bin/automake -AWK = @AWK@ -BUILD_CXX_CONDITIONAL_FALSE = @BUILD_CXX_CONDITIONAL_FALSE@ -BUILD_CXX_CONDITIONAL_TRUE = @BUILD_CXX_CONDITIONAL_TRUE@ -BUILD_FORTRAN_CONDITIONAL_FALSE = @BUILD_FORTRAN_CONDITIONAL_FALSE@ -BUILD_FORTRAN_CONDITIONAL_TRUE = @BUILD_FORTRAN_CONDITIONAL_TRUE@ -BUILD_HDF5_HL_CONDITIONAL_FALSE = @BUILD_HDF5_HL_CONDITIONAL_FALSE@ -BUILD_HDF5_HL_CONDITIONAL_TRUE = @BUILD_HDF5_HL_CONDITIONAL_TRUE@ -BUILD_PABLO_CONDITIONAL_FALSE = @BUILD_PABLO_CONDITIONAL_FALSE@ -BUILD_PABLO_CONDITIONAL_TRUE = @BUILD_PABLO_CONDITIONAL_TRUE@ -BUILD_PARALLEL_CONDITIONAL_FALSE = @BUILD_PARALLEL_CONDITIONAL_FALSE@ -BUILD_PARALLEL_CONDITIONAL_TRUE = @BUILD_PARALLEL_CONDITIONAL_TRUE@ -BUILD_PDB2HDF = @BUILD_PDB2HDF@ -BUILD_PDB2HDF_CONDITIONAL_FALSE = @BUILD_PDB2HDF_CONDITIONAL_FALSE@ -BUILD_PDB2HDF_CONDITIONAL_TRUE = @BUILD_PDB2HDF_CONDITIONAL_TRUE@ -BYTESEX = @BYTESEX@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CC_VERSION = @CC_VERSION@ -CFLAGS = @CFLAGS@ -CONFIG_DATE = @CONFIG_DATE@ -CONFIG_MODE = @CONFIG_MODE@ -CONFIG_USER = @CONFIG_USER@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CXX = @CXX@ -CXXCPP = @CXXCPP@ -CXXDEPMODE = @CXXDEPMODE@ -CXXFLAGS = @CXXFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEBUG_PKG = @DEBUG_PKG@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -DYNAMIC_DIRS = @DYNAMIC_DIRS@ -ECHO = @ECHO@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -F77 = @F77@ - -# Make sure that these variables are exported to the Makefiles -F9XMODEXT = @F9XMODEXT@ -F9XMODFLAG = @F9XMODFLAG@ -F9XSUFFIXFLAG = @F9XSUFFIXFLAG@ -FC = @FC@ -FCFLAGS = @FCFLAGS@ -FCLIBS = @FCLIBS@ -FFLAGS = @FFLAGS@ -FILTERS = @FILTERS@ -FSEARCH_DIRS = @FSEARCH_DIRS@ -H5_VERSION = @H5_VERSION@ -HADDR_T = @HADDR_T@ -HDF5_INTERFACES = @HDF5_INTERFACES@ -HID_T = @HID_T@ -HL = @HL@ -HL_FOR = @HL_FOR@ -HSIZET = @HSIZET@ -HSIZE_T = @HSIZE_T@ -HSSIZE_T = @HSSIZE_T@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -INSTRUMENT_LIBRARY = @INSTRUMENT_LIBRARY@ -LDFLAGS = @LDFLAGS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LIBTOOL = @LIBTOOL@ -LN_S = @LN_S@ -LTLIBOBJS = @LTLIBOBJS@ -LT_STATIC_EXEC = @LT_STATIC_EXEC@ -MAINT = @MAINT@ -MAINTAINER_MODE_FALSE = @MAINTAINER_MODE_FALSE@ -MAINTAINER_MODE_TRUE = @MAINTAINER_MODE_TRUE@ -MAKEINFO = @MAKEINFO@ -MPE = @MPE@ -OBJECT_NAMELEN_DEFAULT_F = @OBJECT_NAMELEN_DEFAULT_F@ -OBJEXT = @OBJEXT@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PARALLEL = @PARALLEL@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PERL = @PERL@ -PTHREAD = @PTHREAD@ -RANLIB = @RANLIB@ -ROOT = @ROOT@ -RUNPARALLEL = @RUNPARALLEL@ -RUNSERIAL = @RUNSERIAL@ -R_INTEGER = @R_INTEGER@ -R_LARGE = @R_LARGE@ -SEARCH = @SEARCH@ -SETX = @SETX@ -SET_MAKE = @SET_MAKE@ - -# Hardcode SHELL to be /bin/sh. Most machines have this shell, and -# on at least one machine configure fails to detect its existence (janus). -# Also, when HDF5 is configured on one machine but run on another, -# configure's automatic SHELL detection may not work on the build machine. -SHELL = /bin/sh -SIZE_T = @SIZE_T@ -STATIC_SHARED = @STATIC_SHARED@ -STRIP = @STRIP@ -TESTPARALLEL = @TESTPARALLEL@ -TRACE_API = @TRACE_API@ -USE_FILTER_DEFLATE = @USE_FILTER_DEFLATE@ -USE_FILTER_FLETCHER32 = @USE_FILTER_FLETCHER32@ -USE_FILTER_NBIT = @USE_FILTER_NBIT@ -USE_FILTER_SCALEOFFSET = @USE_FILTER_SCALEOFFSET@ -USE_FILTER_SHUFFLE = @USE_FILTER_SHUFFLE@ -USE_FILTER_SZIP = @USE_FILTER_SZIP@ -VERSION = @VERSION@ -ac_ct_AR = @ac_ct_AR@ -ac_ct_CC = @ac_ct_CC@ -ac_ct_CXX = @ac_ct_CXX@ -ac_ct_F77 = @ac_ct_F77@ -ac_ct_FC = @ac_ct_FC@ -ac_ct_RANLIB = @ac_ct_RANLIB@ -ac_ct_STRIP = @ac_ct_STRIP@ -am__fastdepCC_FALSE = @am__fastdepCC_FALSE@ -am__fastdepCC_TRUE = @am__fastdepCC_TRUE@ -am__fastdepCXX_FALSE = @am__fastdepCXX_FALSE@ -am__fastdepCXX_TRUE = @am__fastdepCXX_TRUE@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_vendor = @build_vendor@ -datadir = @datadir@ -exec_prefix = @exec_prefix@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ - -# Install directories that automake doesn't know about -includedir = $(exec_prefix)/include -infodir = @infodir@ -install_sh = @install_sh@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localstatedir = @localstatedir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -oldincludedir = @oldincludedir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ - -# Shell commands used in Makefiles -RM = rm -f -CP = cp - -# Some machines need a command to run executables; this is that command -# so that our tests will run. -# We use RUNTESTS instead of RUNSERIAL directly because it may be that -# some tests need to be run with a different command. Older versions -# of the makefiles used the command -# $(LIBTOOL) --mode=execute -# in some directories, for instance. -RUNTESTS = $(RUNSERIAL) - -# Libraries to link to while building -LIBHDF5 = $(top_builddir)/src/libhdf5.la -LIBH5TEST = $(top_builddir)/test/libh5test.la -LIBH5F = $(top_builddir)/fortran/src/libhdf5_fortran.la -LIBH5FTEST = $(top_builddir)/fortran/test/libh5test_fortran.la -LIBH5CPP = $(top_builddir)/c++/src/libhdf5_cpp.la -LIBH5TOOLS = $(top_builddir)/tools/lib/libh5tools.la -LIBH5_HL = $(top_builddir)/hl/src/libhdf5_hl.la -LIBH5F_HL = $(top_builddir)/hl/fortran/src/libhdf5hl_fortran.la -LIBH5CPP_HL = $(top_builddir)/hl/c++/src/libhdf5_hl_cpp.la -docdir = $(exec_prefix)/doc - -# Scripts used to build examples -H5CC = $(bindir)/h5cc -H5CC_PP = $(bindir)/h5pcc -H5FC = $(bindir)/h5fc -H5FC_PP = $(bindir)/h5pfc - -# .chkexe and .chksh files are used to mark tests that have run successfully. -MOSTLYCLEANFILES = *.chkexe *.chksh -localdocdir = $(docdir)/hdf5/cpplus - -# Public doc files (to be installed)... -localdoc_DATA = CppInterfaces.html CppUserNotes.doc CppUserNotes.pdf -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/config/commence-doc.am $(top_srcdir)/config/commence.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \ - && exit 0; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign doc/html/cpplus/Makefile'; \ - cd $(top_srcdir) && \ - $(AUTOMAKE) --foreign doc/html/cpplus/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -mostlyclean-libtool: - -rm -f *.lo - -clean-libtool: - -rm -rf .libs _libs - -distclean-libtool: - -rm -f libtool -uninstall-info-am: -install-localdocDATA: $(localdoc_DATA) - @$(NORMAL_INSTALL) - test -z "$(localdocdir)" || $(mkdir_p) "$(DESTDIR)$(localdocdir)" - @list='$(localdoc_DATA)'; for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - f=$(am__strip_dir) \ - echo " $(localdocDATA_INSTALL) '$$d$$p' '$(DESTDIR)$(localdocdir)/$$f'"; \ - $(localdocDATA_INSTALL) "$$d$$p" "$(DESTDIR)$(localdocdir)/$$f"; \ - done - -uninstall-localdocDATA: - @$(NORMAL_UNINSTALL) - @list='$(localdoc_DATA)'; for p in $$list; do \ - f=$(am__strip_dir) \ - echo " rm -f '$(DESTDIR)$(localdocdir)/$$f'"; \ - rm -f "$(DESTDIR)$(localdocdir)/$$f"; \ - done -tags: TAGS -TAGS: - -ctags: CTAGS -CTAGS: - - -distdir: $(DISTFILES) - $(mkdir_p) $(distdir)/../../../config - @srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's|.|.|g'`; \ - list='$(DISTFILES)'; for file in $$list; do \ - case $$file in \ - $(srcdir)/*) file=`echo "$$file" | sed "s|^$$srcdirstrip/||"`;; \ - $(top_srcdir)/*) file=`echo "$$file" | sed "s|^$$topsrcdirstrip/|$(top_builddir)/|"`;; \ - esac; \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - dir=`echo "$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test "$$dir" != "$$file" && test "$$dir" != "."; then \ - dir="/$$dir"; \ - $(mkdir_p) "$(distdir)$$dir"; \ - else \ - dir=''; \ - fi; \ - if test -d $$d/$$file; then \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \ - fi; \ - cp -pR $$d/$$file $(distdir)$$dir || exit 1; \ - else \ - test -f $(distdir)/$$file \ - || cp -p $$d/$$file $(distdir)/$$file \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: - for dir in "$(DESTDIR)$(localdocdir)"; do \ - test -z "$$dir" || $(mkdir_p) "$$dir"; \ - done -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - `test -z '$(STRIP)' || \ - echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install -mostlyclean-generic: - -test -z "$(MOSTLYCLEANFILES)" || rm -f $(MOSTLYCLEANFILES) - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." -clean: clean-am - -clean-am: clean-generic clean-libtool mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic distclean-libtool - -dvi: dvi-am - -dvi-am: - -html: html-am - -info: info-am - -info-am: - -install-data-am: install-localdocDATA - -install-exec-am: - -install-info: install-info-am - -install-man: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic mostlyclean-libtool - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: uninstall-info-am uninstall-localdocDATA - -.PHONY: all all-am check check-am clean clean-generic clean-libtool \ - distclean distclean-generic distclean-libtool distdir dvi \ - dvi-am html html-am info info-am install install-am \ - install-data install-data-am install-exec install-exec-am \ - install-info install-info-am install-localdocDATA install-man \ - install-strip installcheck installcheck-am installdirs \ - maintainer-clean maintainer-clean-generic mostlyclean \ - mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ - uninstall uninstall-am uninstall-info-am \ - uninstall-localdocDATA - - -# Ignore most rules -lib progs check test _test check-p check-s: - @echo "Nothing to be done" - -tests dep depend: - @@SETX@; for d in X $(SUBDIRS); do \ - if test $$d != X; then \ - (cd $$d && $(MAKE) $(AM_MAKEFLAGS) $@) || exit 1; \ - fi; - done - -# In docs directory, install-doc is the same as install -install-doc install-all: - $(MAKE) $(AM_MAKEFLAGS) install -uninstall-doc uninstall-all: - $(MAKE) $(AM_MAKEFLAGS) uninstall -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/doc/html/dataset_p1.gif b/doc/html/dataset_p1.gif deleted file mode 100644 index 1e7cea0..0000000 Binary files a/doc/html/dataset_p1.gif and /dev/null differ diff --git a/doc/html/dataset_p1.obj b/doc/html/dataset_p1.obj deleted file mode 100644 index 42d66fc..0000000 --- a/doc/html/dataset_p1.obj +++ /dev/null @@ -1,32 +0,0 @@ -%TGIF 3.0-p5 -state(0,33,100,0,0,0,16,1,9,1,1,0,0,1,0,1,1,'Helvetica',0,24,0,0,0,10,0,0,1,1,0,16,0,0,1,1,1,0,1088,1408,0,0,2880). -% -% @(#)$Header$ -% %W% -% -unit("1 pixel/pixel"). -page(1,"",1). -box('black',128,240,288,432,4,1,1,26,0,0,0,0,0,'1',[ -]). -box('black',400,272,464,400,4,1,1,27,0,0,0,0,0,'1',[ -]). -box('black',192,304,224,368,6,1,1,28,0,0,0,0,0,'1',[ -]). -box('black',400,272,432,336,6,1,1,29,0,0,0,0,0,'1',[ -]). -poly('black',2,[ - 224,304,400,272],1,1,1,32,0,0,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -poly('black',2,[ - 224,368,400,336],1,1,1,33,0,0,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -text('black',208,208,'Helvetica',0,20,1,1,0,1,77,17,40,0,14,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "File Dataset"]). -text('black',432,208,'Helvetica',0,20,1,1,0,1,106,17,42,0,14,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Memory Dataset"]). -text('black',320,144,'Helvetica',0,24,1,1,0,1,206,29,68,0,24,5,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Raw Data Transfer"]). -box('black',96,128,512,464,0,1,1,70,0,0,0,0,0,'1',[ -]). diff --git a/doc/html/ddl.html b/doc/html/ddl.html deleted file mode 100644 index fb0596e..0000000 --- a/doc/html/ddl.html +++ /dev/null @@ -1,579 +0,0 @@ - - - - DDL for HDF5 - - - - - - - - - - -
      -
      - - - -
      - HDF5 documents and links 
      - Introduction to HDF5 
      - HDF5 Reference Manual 
      - HDF5 User's Guide for Release 1.6 
      - -
      - And in this document, the - HDF5 User's Guide from Release 1.4.5:     -
      - Files   - Datasets   - Datatypes   - Dataspaces   - Groups   -
      - References   - Attributes   - Property Lists   - Error Handling   -
      - Filters   - Caching   - Chunking   - Mounting Files   -
      - Performance   - Debugging   - Environment   - DDL   -
      -
      -
      -

      DDL in BNF for HDF5

      - - -

      1. Introduction

      - -This document contains the data description language (DDL) for an HDF5 file. -The description is in Backus-Naur Form. - -

      2. Explanation of Symbols

      - -This section contains a brief explanation of the symbols used in the DDL. - -
      -    ::=                      defined as
      -    <tname>                  a token with the name tname
      -    <a> | <b>                one of <a> or <b>
      -    <a>opt                    zero or one occurrence of <a>
      -    <a>*                     zero or more occurrence of <a>
      -    <a>+                     one or more occurrence of <a>
      -    [0-9]                    an element in the range between 0 and 9
      -    `['                      the token within the quotes (used for special characters)
      -    TBD                      To Be Decided
      -
      - -

      3. The DDL

      - - -
      -<file> ::= HDF5 <file_name> { <file_super_block>opt <root_group> }
      -
      -<file_name> ::= <identifier>
      -
      -<file_super_block> ::= BOOT_BLOCK { <super_block_content> }
      -
      -<super_block_content> ::= TBD
      -
      -<root_group> ::= GROUP "/" {
      -                           <unamed_datatype>*
      -                           <object_id>opt
      -                           <group_comment>opt
      -                           <group_attribute>*
      -                           <group_member>*
      -                       }
      -
      -<datatype> ::= <atomic_type> | <compound_type> | <variable_length_type> | <array_type>
      -
      -<unamed_datatype> ::= DATATYPE <unamed_type_name> { <datatype> }
      -
      -<unamed_type_name> ::= the assigned name for unamed type is in the form of 
      -                       #oid, where oid is the object id of the type
      -
      -<atomic_type> ::= <integer>  | <float>  | <time>      | <string> |
      -                  <bitfield> | <opaque> | <reference> | <enum>
      -
      -<integer> ::=  H5T_STD_I8BE     | H5T_STD_I8LE      |
      -               H5T_STD_I16BE    | H5T_STD_I16LE     |
      -               H5T_STD_I32BE    | H5T_STD_I32LE     |
      -               H5T_STD_I64BE    | H5T_STD_I64LE     |
      -               H5T_STD_U8BE     | H5T_STD_U8LE      |
      -               H5T_STD_U16BE    | H5T_STD_U16LE     |
      -               H5T_STD_U32BE    | H5T_STD_U32LE     |
      -               H5T_STD_U64BE    | H5T_STD_U64LE     |
      -               H5T_NATIVE_CHAR  | H5T_NATIVE_UCHAR  |
      -               H5T_NATIVE_SHORT | H5T_NATIVE_USHORT |
      -               H5T_NATIVE_INT   | H5T_NATIVE_UINT   |
      -               H5T_NATIVE_LONG  | H5T_NATIVE_ULONG  |
      -               H5T_NATIVE_LLONG | H5T_NATIVE_ULLONG
      -
      -<float> ::= H5T_IEEE_F32BE   | H5T_IEEE_F32LE     |
      -            H5T_IEEE_F64BE   | H5T_IEEE_F64LE     |
      -            H5T_NATIVE_FLOAT |  H5T_NATIVE_DOUBLE |
      -            H5T_NATIVE_LDOUBLE
      -
      -<time> ::= TBD
      -
      -<string> ::= H5T_STRING { STRSIZE <strsize> ;
      -               STRPAD <strpad> ;
      -               CSET <cset> ;
      -               CTYPE <ctype> ; }  
      -
      -<strsize> ::= <int_value>
      -
      -<strpad> ::= H5T_STR_NULLTERM | H5T_STR_NULLPAD | H5T_STR_SPACEPAD
      -
      -<cset> ::= H5T_CSET_ASCII
      -
      -<ctype> ::= H5T_C_S1 | H5T_FORTRAN_S1
      -
      -<bitfield> ::= TBD
      -
      -<opaque> ::= H5T_OPAQUE { <identifier> }
      -
      -<reference> ::= H5T_REFERENCE { <ref_type> }
      -
      -<ref_type> ::= H5T_STD_REF_OBJECT | H5T_STD_REF_DSETREG
      -
      -<compound_type> ::= H5T_COMPOUND { <member_type_def>+ }
      -
      -<member_type_def> ::= <datatype> <field_name> ;
      -
      -<field_name> ::= <identifier>
      -
      -<variable_length_type> ::= H5T_VLEN { <datatype> }
      -
      -<array_type> ::= H5T_ARRAY { <dim_sizes> <datatype> }
      -
      -<dim_sizes> ::= `['<dimsize>`]' | `['<dimsize>`]'<dim_sizes>
      -
      -<dimsize> ::= <int_value>
      -
      -<attribute> ::= ATTRIBUTE <attr_name> { <dataset_type>    
      -                                        <dataset_space>
      -                                        <data>opt  } 
      -
      -<attr_name> ::= <identifier>
      -
      -<dataset_type> ::= DATATYPE <path_name> | <datatype>
      -
      -<enum> ::= H5T_ENUM { <enum_base_type> <enum_def>+  }
      -
      -<enum_base_type> ::= <integer>
      -// Currently enums can only hold integer type data, but they may be expanded
      -// in the future to hold any datatype
      -
      -<enum_def> ::= <enum_symbol> <enum_val>;
      -
      -<enum_symbol> ::= <identifier>
      -
      -<enum_val> ::= <int_value>
      -
      -<path_name> ::= <path_part>+
      -
      -<path_part> ::= /<identifier>
      -
      -<dataspace> ::= <scalar_space> | <simple_space> | <complex_space>
      -
      -<scalar_space> ::= SCALAR
      -
      -<simple_space> ::= SIMPLE { <current_dims> / <max_dims> }
      -
      -<complex_space> ::= COMPLEX { <complex_space_definition> }
      -
      -<dataset_space> ::= DATASPACE <path_name> | <dataspace>
      -
      -<current_dims> ::= <dims>
      -
      -<max_dims> ::= `(' <max_dim_list> `)'
      -
      -<max_dim_list> ::= <max_dim> | <max_dim>, <max_dim_list>
      -
      -<max_dim> ::= <int_value> | H5S_UNLIMITED
      -
      -<complex_space_definition> ::= TBD
      -
      -<data> ::= DATA { <scalar_space_data> | <simple_space_data> | <complex_space_data> } | <subset>
      -
      -<scalar_space_data> ::= <any_element>
      -
      -<any_element> ::= <atomic_element> | <compound_element> | 
      -                  <variable_length_element> | <array_element>
      -
      -<any_data_seq> ::= <any_element> | <any_element>, <any_data_seq>
      -
      -<atomic_element> :: = <integer_data> | <float_data>    | <time_data>   |
      -                      <string_data>  | <bitfield_data> | <opaque_data> |
      -                      <enum_data>    | <reference_data>
      -
      -<subset> ::= SUBSET { <start>;
      -                      <stride>;
      -                      <count>;
      -                      <block>;
      -                 DATA { <simple_space_data> }
      -             }
      -
      -<start> ::= START (<coor_list>)
      -
      -<stride> ::= STRIDE (<pos_list>)
      -
      -<count> ::= COUNT (<coor_list>)
      -
      -<block> ::= BLOCK (<coor_list>)
      -
      -<coor_list> ::= <int_value>, <coor_list> | <int_value>
      -
      -<integer_data> ::= <int_value>
      -
      -<float_data> ::= a floating point number
      -
      -<time_data> ::= TBD
      -
      -<string_data> ::= a string
      -// A string is enclosed in double quotes. 
      -// If a string is displayed on more than one line, string concatenate
      -// operator '//'is used.
      -
      -<bitfield_data> ::= TBD
      -
      -<opaque_data> ::= TBD
      -
      -<enum_data> ::= <enum_symbol>
      -
      -<reference_data> ::= <object_ref_data> | <data_region_data> | NULL
      -
      -<object_ref_data> ::= <object_type> <object_num>
      -
      -<object_type> ::= DATASET | GROUP | DATATYPE
      -
      -<object_id> ::= OBJECTID { <object_num> }
      -
      -<object_num> ::= <int_value>:<int_value> | <int_value>
      -
      -<data_region_data> ::= H5T_STD_REF_DSETREG <object_num> { <data_region_data_list> }
      -
      -<data_region_data_list> ::= <data_region_data_info>, <data_region_data_list> | <data_region_data_info>
      -
      -<data_region_data_info> ::= <region_info> | <point_info>
      -
      -<region_info> ::= (<region_vals>)
      -
      -<region_vals> ::= <lower_bound>:<upper_bound>, <region_vals> | <lower_bound>:<upper_bound>
      -
      -<lower_bound> ::= <int_value>
      -
      -<upper_bound> ::= <int_value>
      -
      -<point_info> ::= (<point_vals>)
      -
      -<point_vals> ::= <int_value> | <int_value>, <point_vals>
      -
      -<compound_element> ::= { <any_data_seq> }
      -
      -<atomic_simple_data> :: = <atomic_element>, <atomic_simple_data> | <atomic_element>
      -
      -<simple_space_data> :: = <any_data_seq>
      -
      -<variable_length_element> ::= ( <any_data_seq> )
      -
      -<array_element> ::= `[' <any_data_seq> `]'
      -
      -<complex_space_data> ::= TBD
      -
      -<named_datatype> ::= DATATYPE <type_name> { <datatype> }
      -
      -<type_name> ::= <identifier>
      -
      -<named_dataspace> ::= TBD
      -
      -<hardlink> ::= HARDLINK <path_name> 
      -
      -<group> ::= GROUP <group_name> { <hardlink> | <group_info> }
      -
      -<group_comment> ::= COMMENT <string_data>
      -            
      -<group_name> ::= <identifier>
      -
      -<group_info> ::= <object_id>opt <group_comment>opt <group_attribute>* <group_member>* 
      -            
      -<group_attribute> ::= <attribute> 
      -
      -<group_member> ::= <named_datatype> | <named_dataspace> | <group> |
      -                   <dataset> | <softlink>
      -
      -<dataset> ::= DATASET <dataset_name> { <hardlink> | <dataset_info> }
      -
      -<dataset_info> ::= <dataset_type>  <dataset_space> <storagelayout>opt
      -                   <compression>opt <dataset_attribute>* <object_id>opt
      -                   <data>opt
      -// Tokens above can be in any order as long as <data> is 
      -// after <dataset_type> and <dataset_space>.
      -
      -<dataset_name> ::= <identifier>
      -
      -<storagelayout> :: = STORAGELAYOUT <contiguous_layout>  |  
      -                     STORAGELAYOUT <chunked_layout>     |
      -                     STORAGELAYOUT <compact_layout>     |
      -                     STORAGELAYOUT <external_layout> 
      -
      -<contiguous_layout> ::= {CONTIGUOUS}    // default
      -
      -<chunked_layout> ::=  {CHUNKED <dims> }
      -
      -<dims> ::= (<dims_values>)
      -
      -<dims_values> ::= <int_value> | <int_value>, <dims_values>
      -
      -<compact_layout> ::= TBD           
      -
      -<external_layout> ::= {EXTERNAL <external_file>+ }
      -
      -<external_file> ::= (<file_name> <offset> <size>) 
      -
      -<offset> ::= <int_value>
      -
      -<size> ::= <int_value>
      -
      -<compression> :: = COMPRESSION { TBD }  
      -
      -<dataset_attribute> ::= <attribute> 
      -
      -<softlink> ::= SOFTLINK <softlink_name> { LINKTARGET <target> }
      -
      -<softlink_name> ::= <identifier>
      -
      -<target> ::= <identifier>
      -
      -<identifier> ::= a string
      -// character '/' should be used with care. 
      -
      -<pos_list>  ::= <pos_int>, <pos_list> | <pos_int>
      -
      -<int_value> ::= 0 | <pos_int>
      -
      -<pos_int>   ::= [1-9][0-9]*
      -
      -
      -
      - - -

      4. An Example of an HDF5 File in DDL

      - - -
      -HDF5 "example.h5" {
      -GROUP "/" {
      -   ATTRIBUTE "attr1" {
      -      DATATYPE H5T_STRING { 
      -           STRSIZE 17;
      -           STRPAD H5T_STR_NULLTERM;
      -           CSET H5T_CSET_ASCII;
      -           CTYPE H5T_C_S1;
      -         }
      -      DATASPACE SCALAR 
      -      DATA {
      -         "string attribute"
      -      }
      -   }
      -   DATASET "dset1" {
      -      DATATYPE H5T_STD_I32BE
      -      DATASPACE SIMPLE { ( 10, 10 ) / ( 10, 10 ) }
      -      DATA {
      -         0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
      -         0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
      -         0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
      -         0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
      -         0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
      -         0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
      -         0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
      -         0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
      -         0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
      -         0, 1, 2, 3, 4, 5, 6, 7, 8, 9
      -      }
      -   }
      -   DATASET "dset2" {
      -      DATATYPE H5T_COMPOUND {
      -         H5T_STD_I32BE "a";
      -         H5T_IEEE_F32BE "b";
      -         H5T_IEEE_F64BE "c";
      -      }
      -      DATASPACE SIMPLE { ( 5 ) / ( 5 ) }
      -      DATA {
      -         {
      -            1,
      -            0.1,
      -            0.01
      -         },
      -         {
      -            2,
      -            0.2,
      -            0.02
      -         },
      -         {
      -            3,
      -            0.3,
      -            0.03
      -         },
      -         {
      -            4,
      -            0.4,
      -            0.04
      -         },
      -         {
      -            5,
      -            0.5,
      -            0.05
      -         }
      -      }
      -   }
      -   GROUP "group1" {
      -      COMMENT "This is a comment for group1";
      -      DATASET "dset3" {
      -         DATATYPE "/type1"
      -         DATASPACE SIMPLE { ( 5 ) / ( 5 ) }
      -         DATA {
      -            {
      -               [ 0, 1, 2, 3 ],
      -               [ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1,
      -                 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
      -                 0.3, 0.3, 0.3, 0.3, 0.3, 0.3,
      -                 0.4, 0.4, 0.4, 0.4, 0.4, 0.4,
      -                 0.5, 0.5, 0.5, 0.5, 0.5, 0.5 ]
      -            },
      -            {
      -               [ 0, 1, 2, 3 ],
      -               [ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1,
      -                 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
      -                 0.3, 0.3, 0.3, 0.3, 0.3, 0.3,
      -                 0.4, 0.4, 0.4, 0.4, 0.4, 0.4,
      -                 0.5, 0.5, 0.5, 0.5, 0.5, 0.5 ]
      -            },
      -            {
      -               [ 0, 1, 2, 3 ],
      -               [ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1,
      -                 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
      -                 0.3, 0.3, 0.3, 0.3, 0.3, 0.3,
      -                 0.4, 0.4, 0.4, 0.4, 0.4, 0.4,
      -                 0.5, 0.5, 0.5, 0.5, 0.5, 0.5 ]
      -            },
      -            {
      -               [ 0, 1, 2, 3 ],
      -               [ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1,
      -                 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
      -                 0.3, 0.3, 0.3, 0.3, 0.3, 0.3,
      -                 0.4, 0.4, 0.4, 0.4, 0.4, 0.4,
      -                 0.5, 0.5, 0.5, 0.5, 0.5, 0.5 ]
      -            },
      -            {
      -               [ 0, 1, 2, 3 ],
      -               [ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1,
      -                 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
      -                 0.3, 0.3, 0.3, 0.3, 0.3, 0.3,
      -                 0.4, 0.4, 0.4, 0.4, 0.4, 0.4,
      -                 0.5, 0.5, 0.5, 0.5, 0.5, 0.5 ]
      -            }
      -         }
      -      }
      -   }
      -   DATASET "dset3" {
      -      DATATYPE H5T_VLEN { H5T_STD_I32LE } 
      -      DATASPACE SIMPLE { ( 4 ) / ( 4 ) } 
      -      DATA {
      -         (0), (10, 11), (20, 21, 22), (30, 31, 32, 33)
      -      } 
      -   }
      -   GROUP "group2" {
      -      HARDLINK "/group1"
      -   }
      -   SOFTLINK "slink1" {
      -      LINKTARGET "somevalue"
      -   }
      -   DATATYPE "type1" H5T_COMPOUND {
      -      H5T_ARRAY { [4] H5T_STD_I32BE } "a";
      -      H5T_ARRAY { [5][6] H5T_IEEE_F32BE } "b";
      -   }
      -}
      -}
      -
      -
      - - -
      -
      - - - -
      - HDF5 documents and links 
      - Introduction to HDF5 
      - HDF5 Reference Manual 
      - HDF5 User's Guide for Release 1.6 
      - -
      - And in this document, the - HDF5 User's Guide from Release 1.4.5:     -
      - Files   - Datasets   - Datatypes   - Dataspaces   - Groups   -
      - References   - Attributes   - Property Lists   - Error Handling   -
      - Filters   - Caching   - Chunking   - Mounting Files   -
      - Performance   - Debugging   - Environment   - DDL   -
      -
      -
      -
      -HDF Help Desk -
      -Describes HDF5 Release 1.7, the unreleased development branch; working toward HDF5 Release 1.8.0 -
      - - -Last modified: 17 November 2000 - - - - diff --git a/doc/html/ed_libs/Footer.lbi b/doc/html/ed_libs/Footer.lbi deleted file mode 100644 index 8f5031e..0000000 --- a/doc/html/ed_libs/Footer.lbi +++ /dev/null @@ -1,5 +0,0 @@ -
      -HDF Help Desk -
      -Describes HDF5 Release 1.7, the unreleased development branch; working toward HDF5 Release 1.8.0 -
      \ No newline at end of file diff --git a/doc/html/ed_libs/Makefile.am b/doc/html/ed_libs/Makefile.am deleted file mode 100644 index 49eb355..0000000 --- a/doc/html/ed_libs/Makefile.am +++ /dev/null @@ -1,20 +0,0 @@ -# HDF5 Library Doc Makefile(.in) -# -# Copyright (C) 1997, 2002 -# National Center for Supercomputing Applications. -# All rights reserved. -# -## -## Makefile.am -## Run automake to generate a Makefile.in from this file. -# - -include $(top_srcdir)/config/commence-doc.am - -localdocdir = $(docdir)/hdf5/ed_libs - -# Public doc files (to be installed)... -localdoc_DATA=Footer.lbi NavBar_ADevG.lbi NavBar_Common.lbi NavBar_Intro.lbi \ - NavBar_RM.lbi NavBar_TechN.lbi NavBar_UG.lbi styles_Format.lbi \ - styles_Gen.lbi styles_Index.lbi styles_Intro.lbi styles_RM.lbi \ - styles_UG.lbi diff --git a/doc/html/ed_libs/Makefile.in b/doc/html/ed_libs/Makefile.in deleted file mode 100644 index 31803a5..0000000 --- a/doc/html/ed_libs/Makefile.in +++ /dev/null @@ -1,489 +0,0 @@ -# Makefile.in generated by automake 1.9.5 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005 Free Software Foundation, Inc. -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# HDF5 Library Doc Makefile(.in) -# -# Copyright (C) 1997, 2002 -# National Center for Supercomputing Applications. -# All rights reserved. -# -# - -srcdir = @srcdir@ -top_srcdir = @top_srcdir@ -VPATH = @srcdir@ -pkgdatadir = $(datadir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -top_builddir = ../../.. -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -INSTALL = @INSTALL@ -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ - $(top_srcdir)/config/commence-doc.am \ - $(top_srcdir)/config/commence.am -subdir = doc/html/ed_libs -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/configure.in -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(SHELL) $(top_srcdir)/bin/mkinstalldirs -CONFIG_HEADER = $(top_builddir)/src/H5config.h -CONFIG_CLEAN_FILES = -SOURCES = -DIST_SOURCES = -am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; -am__vpath_adj = case $$p in \ - $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ - *) f=$$p;; \ - esac; -am__strip_dir = `echo $$p | sed -e 's|^.*/||'`; -am__installdirs = "$(DESTDIR)$(localdocdir)" -localdocDATA_INSTALL = $(INSTALL_DATA) -DATA = $(localdoc_DATA) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) - -# Set the paths for AFS installs of autotools for Linux machines -# Ideally, these tools should never be needed during the build. -ACLOCAL = /afs/ncsa/projects/hdf/packages/automake_1.9.5/Linux_2.4/bin/aclocal -I /afs/ncsa/projects/hdf/packages/libtool_1.5.14/Linux_2.4/share/aclocal -ADD_PARALLEL_FILES = @ADD_PARALLEL_FILES@ -AMDEP_FALSE = @AMDEP_FALSE@ -AMDEP_TRUE = @AMDEP_TRUE@ -AMTAR = @AMTAR@ -AM_MAKEFLAGS = @AM_MAKEFLAGS@ -AR = @AR@ -AUTOCONF = /afs/ncsa/projects/hdf/packages/autoconf_2.59/Linux_2.4/bin/autoconf -AUTOHEADER = /afs/ncsa/projects/hdf/packages/autoconf_2.59/Linux_2.4/bin/autoheader -AUTOMAKE = /afs/ncsa/projects/hdf/packages/automake_1.9.5/Linux_2.4/bin/automake -AWK = @AWK@ -BUILD_CXX_CONDITIONAL_FALSE = @BUILD_CXX_CONDITIONAL_FALSE@ -BUILD_CXX_CONDITIONAL_TRUE = @BUILD_CXX_CONDITIONAL_TRUE@ -BUILD_FORTRAN_CONDITIONAL_FALSE = @BUILD_FORTRAN_CONDITIONAL_FALSE@ -BUILD_FORTRAN_CONDITIONAL_TRUE = @BUILD_FORTRAN_CONDITIONAL_TRUE@ -BUILD_HDF5_HL_CONDITIONAL_FALSE = @BUILD_HDF5_HL_CONDITIONAL_FALSE@ -BUILD_HDF5_HL_CONDITIONAL_TRUE = @BUILD_HDF5_HL_CONDITIONAL_TRUE@ -BUILD_PABLO_CONDITIONAL_FALSE = @BUILD_PABLO_CONDITIONAL_FALSE@ -BUILD_PABLO_CONDITIONAL_TRUE = @BUILD_PABLO_CONDITIONAL_TRUE@ -BUILD_PARALLEL_CONDITIONAL_FALSE = @BUILD_PARALLEL_CONDITIONAL_FALSE@ -BUILD_PARALLEL_CONDITIONAL_TRUE = @BUILD_PARALLEL_CONDITIONAL_TRUE@ -BUILD_PDB2HDF = @BUILD_PDB2HDF@ -BUILD_PDB2HDF_CONDITIONAL_FALSE = @BUILD_PDB2HDF_CONDITIONAL_FALSE@ -BUILD_PDB2HDF_CONDITIONAL_TRUE = @BUILD_PDB2HDF_CONDITIONAL_TRUE@ -BYTESEX = @BYTESEX@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CC_VERSION = @CC_VERSION@ -CFLAGS = @CFLAGS@ -CONFIG_DATE = @CONFIG_DATE@ -CONFIG_MODE = @CONFIG_MODE@ -CONFIG_USER = @CONFIG_USER@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CXX = @CXX@ -CXXCPP = @CXXCPP@ -CXXDEPMODE = @CXXDEPMODE@ -CXXFLAGS = @CXXFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEBUG_PKG = @DEBUG_PKG@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -DYNAMIC_DIRS = @DYNAMIC_DIRS@ -ECHO = @ECHO@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -F77 = @F77@ - -# Make sure that these variables are exported to the Makefiles -F9XMODEXT = @F9XMODEXT@ -F9XMODFLAG = @F9XMODFLAG@ -F9XSUFFIXFLAG = @F9XSUFFIXFLAG@ -FC = @FC@ -FCFLAGS = @FCFLAGS@ -FCLIBS = @FCLIBS@ -FFLAGS = @FFLAGS@ -FILTERS = @FILTERS@ -FSEARCH_DIRS = @FSEARCH_DIRS@ -H5_VERSION = @H5_VERSION@ -HADDR_T = @HADDR_T@ -HDF5_INTERFACES = @HDF5_INTERFACES@ -HID_T = @HID_T@ -HL = @HL@ -HL_FOR = @HL_FOR@ -HSIZET = @HSIZET@ -HSIZE_T = @HSIZE_T@ -HSSIZE_T = @HSSIZE_T@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -INSTRUMENT_LIBRARY = @INSTRUMENT_LIBRARY@ -LDFLAGS = @LDFLAGS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LIBTOOL = @LIBTOOL@ -LN_S = @LN_S@ -LTLIBOBJS = @LTLIBOBJS@ -LT_STATIC_EXEC = @LT_STATIC_EXEC@ -MAINT = @MAINT@ -MAINTAINER_MODE_FALSE = @MAINTAINER_MODE_FALSE@ -MAINTAINER_MODE_TRUE = @MAINTAINER_MODE_TRUE@ -MAKEINFO = @MAKEINFO@ -MPE = @MPE@ -OBJECT_NAMELEN_DEFAULT_F = @OBJECT_NAMELEN_DEFAULT_F@ -OBJEXT = @OBJEXT@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PARALLEL = @PARALLEL@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PERL = @PERL@ -PTHREAD = @PTHREAD@ -RANLIB = @RANLIB@ -ROOT = @ROOT@ -RUNPARALLEL = @RUNPARALLEL@ -RUNSERIAL = @RUNSERIAL@ -R_INTEGER = @R_INTEGER@ -R_LARGE = @R_LARGE@ -SEARCH = @SEARCH@ -SETX = @SETX@ -SET_MAKE = @SET_MAKE@ - -# Hardcode SHELL to be /bin/sh. Most machines have this shell, and -# on at least one machine configure fails to detect its existence (janus). -# Also, when HDF5 is configured on one machine but run on another, -# configure's automatic SHELL detection may not work on the build machine. -SHELL = /bin/sh -SIZE_T = @SIZE_T@ -STATIC_SHARED = @STATIC_SHARED@ -STRIP = @STRIP@ -TESTPARALLEL = @TESTPARALLEL@ -TRACE_API = @TRACE_API@ -USE_FILTER_DEFLATE = @USE_FILTER_DEFLATE@ -USE_FILTER_FLETCHER32 = @USE_FILTER_FLETCHER32@ -USE_FILTER_NBIT = @USE_FILTER_NBIT@ -USE_FILTER_SCALEOFFSET = @USE_FILTER_SCALEOFFSET@ -USE_FILTER_SHUFFLE = @USE_FILTER_SHUFFLE@ -USE_FILTER_SZIP = @USE_FILTER_SZIP@ -VERSION = @VERSION@ -ac_ct_AR = @ac_ct_AR@ -ac_ct_CC = @ac_ct_CC@ -ac_ct_CXX = @ac_ct_CXX@ -ac_ct_F77 = @ac_ct_F77@ -ac_ct_FC = @ac_ct_FC@ -ac_ct_RANLIB = @ac_ct_RANLIB@ -ac_ct_STRIP = @ac_ct_STRIP@ -am__fastdepCC_FALSE = @am__fastdepCC_FALSE@ -am__fastdepCC_TRUE = @am__fastdepCC_TRUE@ -am__fastdepCXX_FALSE = @am__fastdepCXX_FALSE@ -am__fastdepCXX_TRUE = @am__fastdepCXX_TRUE@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_vendor = @build_vendor@ -datadir = @datadir@ -exec_prefix = @exec_prefix@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ - -# Install directories that automake doesn't know about -includedir = $(exec_prefix)/include -infodir = @infodir@ -install_sh = @install_sh@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localstatedir = @localstatedir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -oldincludedir = @oldincludedir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ - -# Shell commands used in Makefiles -RM = rm -f -CP = cp - -# Some machines need a command to run executables; this is that command -# so that our tests will run. -# We use RUNTESTS instead of RUNSERIAL directly because it may be that -# some tests need to be run with a different command. Older versions -# of the makefiles used the command -# $(LIBTOOL) --mode=execute -# in some directories, for instance. -RUNTESTS = $(RUNSERIAL) - -# Libraries to link to while building -LIBHDF5 = $(top_builddir)/src/libhdf5.la -LIBH5TEST = $(top_builddir)/test/libh5test.la -LIBH5F = $(top_builddir)/fortran/src/libhdf5_fortran.la -LIBH5FTEST = $(top_builddir)/fortran/test/libh5test_fortran.la -LIBH5CPP = $(top_builddir)/c++/src/libhdf5_cpp.la -LIBH5TOOLS = $(top_builddir)/tools/lib/libh5tools.la -LIBH5_HL = $(top_builddir)/hl/src/libhdf5_hl.la -LIBH5F_HL = $(top_builddir)/hl/fortran/src/libhdf5hl_fortran.la -LIBH5CPP_HL = $(top_builddir)/hl/c++/src/libhdf5_hl_cpp.la -docdir = $(exec_prefix)/doc - -# Scripts used to build examples -H5CC = $(bindir)/h5cc -H5CC_PP = $(bindir)/h5pcc -H5FC = $(bindir)/h5fc -H5FC_PP = $(bindir)/h5pfc - -# .chkexe and .chksh files are used to mark tests that have run successfully. -MOSTLYCLEANFILES = *.chkexe *.chksh -localdocdir = $(docdir)/hdf5/ed_libs - -# Public doc files (to be installed)... -localdoc_DATA = Footer.lbi NavBar_ADevG.lbi NavBar_Common.lbi NavBar_Intro.lbi \ - NavBar_RM.lbi NavBar_TechN.lbi NavBar_UG.lbi styles_Format.lbi \ - styles_Gen.lbi styles_Index.lbi styles_Intro.lbi styles_RM.lbi \ - styles_UG.lbi - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/config/commence-doc.am $(top_srcdir)/config/commence.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \ - && exit 0; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign doc/html/ed_libs/Makefile'; \ - cd $(top_srcdir) && \ - $(AUTOMAKE) --foreign doc/html/ed_libs/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -mostlyclean-libtool: - -rm -f *.lo - -clean-libtool: - -rm -rf .libs _libs - -distclean-libtool: - -rm -f libtool -uninstall-info-am: -install-localdocDATA: $(localdoc_DATA) - @$(NORMAL_INSTALL) - test -z "$(localdocdir)" || $(mkdir_p) "$(DESTDIR)$(localdocdir)" - @list='$(localdoc_DATA)'; for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - f=$(am__strip_dir) \ - echo " $(localdocDATA_INSTALL) '$$d$$p' '$(DESTDIR)$(localdocdir)/$$f'"; \ - $(localdocDATA_INSTALL) "$$d$$p" "$(DESTDIR)$(localdocdir)/$$f"; \ - done - -uninstall-localdocDATA: - @$(NORMAL_UNINSTALL) - @list='$(localdoc_DATA)'; for p in $$list; do \ - f=$(am__strip_dir) \ - echo " rm -f '$(DESTDIR)$(localdocdir)/$$f'"; \ - rm -f "$(DESTDIR)$(localdocdir)/$$f"; \ - done -tags: TAGS -TAGS: - -ctags: CTAGS -CTAGS: - - -distdir: $(DISTFILES) - $(mkdir_p) $(distdir)/../../../config - @srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's|.|.|g'`; \ - list='$(DISTFILES)'; for file in $$list; do \ - case $$file in \ - $(srcdir)/*) file=`echo "$$file" | sed "s|^$$srcdirstrip/||"`;; \ - $(top_srcdir)/*) file=`echo "$$file" | sed "s|^$$topsrcdirstrip/|$(top_builddir)/|"`;; \ - esac; \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - dir=`echo "$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test "$$dir" != "$$file" && test "$$dir" != "."; then \ - dir="/$$dir"; \ - $(mkdir_p) "$(distdir)$$dir"; \ - else \ - dir=''; \ - fi; \ - if test -d $$d/$$file; then \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \ - fi; \ - cp -pR $$d/$$file $(distdir)$$dir || exit 1; \ - else \ - test -f $(distdir)/$$file \ - || cp -p $$d/$$file $(distdir)/$$file \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: - for dir in "$(DESTDIR)$(localdocdir)"; do \ - test -z "$$dir" || $(mkdir_p) "$$dir"; \ - done -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - `test -z '$(STRIP)' || \ - echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install -mostlyclean-generic: - -test -z "$(MOSTLYCLEANFILES)" || rm -f $(MOSTLYCLEANFILES) - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." -clean: clean-am - -clean-am: clean-generic clean-libtool mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic distclean-libtool - -dvi: dvi-am - -dvi-am: - -html: html-am - -info: info-am - -info-am: - -install-data-am: install-localdocDATA - -install-exec-am: - -install-info: install-info-am - -install-man: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic mostlyclean-libtool - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: uninstall-info-am uninstall-localdocDATA - -.PHONY: all all-am check check-am clean clean-generic clean-libtool \ - distclean distclean-generic distclean-libtool distdir dvi \ - dvi-am html html-am info info-am install install-am \ - install-data install-data-am install-exec install-exec-am \ - install-info install-info-am install-localdocDATA install-man \ - install-strip installcheck installcheck-am installdirs \ - maintainer-clean maintainer-clean-generic mostlyclean \ - mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ - uninstall uninstall-am uninstall-info-am \ - uninstall-localdocDATA - - -# Ignore most rules -lib progs check test _test check-p check-s: - @echo "Nothing to be done" - -tests dep depend: - @@SETX@; for d in X $(SUBDIRS); do \ - if test $$d != X; then \ - (cd $$d && $(MAKE) $(AM_MAKEFLAGS) $@) || exit 1; \ - fi; - done - -# In docs directory, install-doc is the same as install -install-doc install-all: - $(MAKE) $(AM_MAKEFLAGS) install -uninstall-doc uninstall-all: - $(MAKE) $(AM_MAKEFLAGS) uninstall -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/doc/html/ed_libs/NavBar_ADevG.lbi b/doc/html/ed_libs/NavBar_ADevG.lbi deleted file mode 100644 index 2178e91..0000000 --- a/doc/html/ed_libs/NavBar_ADevG.lbi +++ /dev/null @@ -1,18 +0,0 @@ -
      -
      - - - -
      - HDF5 documents and links 
      - Introduction to HDF5 
      - -
      - HDF5 User's Guide 
      - HDF5 Reference Manual 
      - HDF5 Application Developer's Guide 
      -
      -
      -
      diff --git a/doc/html/ed_libs/NavBar_Common.lbi b/doc/html/ed_libs/NavBar_Common.lbi deleted file mode 100644 index 47d2bbd..0000000 --- a/doc/html/ed_libs/NavBar_Common.lbi +++ /dev/null @@ -1,17 +0,0 @@ -
      -
      - - - -
      - HDF5 documents and links 
      - Introduction to HDF5 
      - -
      - HDF5 User's Guide 
      - HDF5 Reference Manual 
      -
      -
      -
      diff --git a/doc/html/ed_libs/NavBar_Intro.lbi b/doc/html/ed_libs/NavBar_Intro.lbi deleted file mode 100644 index 81d035b..0000000 --- a/doc/html/ed_libs/NavBar_Intro.lbi +++ /dev/null @@ -1,17 +0,0 @@ -
      -
      - - - -
      -Introduction to HDF5 
      -HDF5 User Guide  - -
      -HDF5 Reference Manual 
      -Other HDF5 documents and links  -
      -
      -
      diff --git a/doc/html/ed_libs/NavBar_RM.lbi b/doc/html/ed_libs/NavBar_RM.lbi deleted file mode 100644 index 391a806..0000000 --- a/doc/html/ed_libs/NavBar_RM.lbi +++ /dev/null @@ -1,39 +0,0 @@ -
      -
      - - - -
      -HDF5 documents and links 
      -Introduction to HDF5 
      -HDF5 User Guide 
      - -
      -And in this document, the -HDF5 Reference Manual   -
      -H5IM   -H5LT   -H5PT   -H5TB   -
      -H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5I   -H5P   -
      -H5R   -H5S   -H5T   -H5Z   -Tools   -Datatypes   -
      -
      -
      \ No newline at end of file diff --git a/doc/html/ed_libs/NavBar_TechN.lbi b/doc/html/ed_libs/NavBar_TechN.lbi deleted file mode 100644 index 99b4b4f..0000000 --- a/doc/html/ed_libs/NavBar_TechN.lbi +++ /dev/null @@ -1,27 +0,0 @@ -
      -
      - - - - -
      -HDF5 documents and links 
      -Introduction to HDF5 
      - -
      -HDF5 User's Guide 
      -HDF5 Application Developer's Guide 
      -HDF5 Reference Manual 
      - - - -
      -
      -
      diff --git a/doc/html/ed_libs/NavBar_UG.lbi b/doc/html/ed_libs/NavBar_UG.lbi deleted file mode 100644 index f6de063..0000000 --- a/doc/html/ed_libs/NavBar_UG.lbi +++ /dev/null @@ -1,40 +0,0 @@ -
      -
      - - - -
      - HDF5 documents and links 
      - Introduction to HDF5 
      - HDF5 Reference Manual 
      - HDF5 User's Guide for Release 1.6 
      - -
      - And in this document, the - HDF5 User's Guide from Release 1.4.5:     -
      - Files   - Datasets   - Datatypes   - Dataspaces   - Groups   -
      - References   - Attributes   - Property Lists   - Error Handling   -
      - Filters   - Caching   - Chunking   - Mounting Files   -
      - Performance   - Debugging   - Environment   - DDL   -
      -
      -
      diff --git a/doc/html/ed_libs/styles_Format.lbi b/doc/html/ed_libs/styles_Format.lbi deleted file mode 100644 index f979cf0..0000000 --- a/doc/html/ed_libs/styles_Format.lbi +++ /dev/null @@ -1,18 +0,0 @@ - - - - diff --git a/doc/html/ed_libs/styles_Gen.lbi b/doc/html/ed_libs/styles_Gen.lbi deleted file mode 100644 index 26935f2..0000000 --- a/doc/html/ed_libs/styles_Gen.lbi +++ /dev/null @@ -1,18 +0,0 @@ - - - - diff --git a/doc/html/ed_libs/styles_Index.lbi b/doc/html/ed_libs/styles_Index.lbi deleted file mode 100644 index 25ecd90..0000000 --- a/doc/html/ed_libs/styles_Index.lbi +++ /dev/null @@ -1,18 +0,0 @@ - - - - diff --git a/doc/html/ed_libs/styles_Intro.lbi b/doc/html/ed_libs/styles_Intro.lbi deleted file mode 100644 index 08547c3..0000000 --- a/doc/html/ed_libs/styles_Intro.lbi +++ /dev/null @@ -1,18 +0,0 @@ - - - - diff --git a/doc/html/ed_libs/styles_RM.lbi b/doc/html/ed_libs/styles_RM.lbi deleted file mode 100644 index 3dd8eb3..0000000 --- a/doc/html/ed_libs/styles_RM.lbi +++ /dev/null @@ -1,19 +0,0 @@ - - - - - diff --git a/doc/html/ed_libs/styles_UG.lbi b/doc/html/ed_libs/styles_UG.lbi deleted file mode 100644 index a21c739..0000000 --- a/doc/html/ed_libs/styles_UG.lbi +++ /dev/null @@ -1,18 +0,0 @@ - - - - diff --git a/doc/html/ed_styles/FormatElect.css b/doc/html/ed_styles/FormatElect.css deleted file mode 100644 index cd181cd..0000000 --- a/doc/html/ed_styles/FormatElect.css +++ /dev/null @@ -1,35 +0,0 @@ - \ No newline at end of file diff --git a/doc/html/ed_styles/FormatPrint.css b/doc/html/ed_styles/FormatPrint.css deleted file mode 100644 index 6b25a73..0000000 --- a/doc/html/ed_styles/FormatPrint.css +++ /dev/null @@ -1,58 +0,0 @@ - \ No newline at end of file diff --git a/doc/html/ed_styles/GenElect.css b/doc/html/ed_styles/GenElect.css deleted file mode 100644 index cd181cd..0000000 --- a/doc/html/ed_styles/GenElect.css +++ /dev/null @@ -1,35 +0,0 @@ - \ No newline at end of file diff --git a/doc/html/ed_styles/GenPrint.css b/doc/html/ed_styles/GenPrint.css deleted file mode 100644 index 6b25a73..0000000 --- a/doc/html/ed_styles/GenPrint.css +++ /dev/null @@ -1,58 +0,0 @@ - \ No newline at end of file diff --git a/doc/html/ed_styles/IndexElect.css b/doc/html/ed_styles/IndexElect.css deleted file mode 100644 index cd181cd..0000000 --- a/doc/html/ed_styles/IndexElect.css +++ /dev/null @@ -1,35 +0,0 @@ - \ No newline at end of file diff --git a/doc/html/ed_styles/IndexPrint.css b/doc/html/ed_styles/IndexPrint.css deleted file mode 100644 index 6b25a73..0000000 --- a/doc/html/ed_styles/IndexPrint.css +++ /dev/null @@ -1,58 +0,0 @@ - \ No newline at end of file diff --git a/doc/html/ed_styles/IntroElect.css b/doc/html/ed_styles/IntroElect.css deleted file mode 100644 index cd181cd..0000000 --- a/doc/html/ed_styles/IntroElect.css +++ /dev/null @@ -1,35 +0,0 @@ - \ No newline at end of file diff --git a/doc/html/ed_styles/IntroPrint.css b/doc/html/ed_styles/IntroPrint.css deleted file mode 100644 index 6b25a73..0000000 --- a/doc/html/ed_styles/IntroPrint.css +++ /dev/null @@ -1,58 +0,0 @@ - \ No newline at end of file diff --git a/doc/html/ed_styles/Makefile.am b/doc/html/ed_styles/Makefile.am deleted file mode 100644 index a4b86e9..0000000 --- a/doc/html/ed_styles/Makefile.am +++ /dev/null @@ -1,19 +0,0 @@ -# HDF5 Library Doc Makefile(.in) -# -# Copyright (C) 1997, 2002 -# National Center for Supercomputing Applications. -# All rights reserved. -# -## -## Makefile.am -## Run automake to generate a Makefile.in from this file. -# - -include $(top_srcdir)/config/commence-doc.am - -localdocdir = $(docdir)/hdf5/ed_styles - -# Public doc files (to be installed)... -localdoc_DATA=FormatElect.css FormatPrint.css GenElect.css GenPrint.css \ - IndexElect.css IndexPrint.css IntroElect.css IntroPrint.css \ - RMelect.css RMprint.css UGelect.css UGprint.css diff --git a/doc/html/ed_styles/Makefile.in b/doc/html/ed_styles/Makefile.in deleted file mode 100644 index 98b1af9..0000000 --- a/doc/html/ed_styles/Makefile.in +++ /dev/null @@ -1,488 +0,0 @@ -# Makefile.in generated by automake 1.9.5 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005 Free Software Foundation, Inc. -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# HDF5 Library Doc Makefile(.in) -# -# Copyright (C) 1997, 2002 -# National Center for Supercomputing Applications. -# All rights reserved. -# -# - -srcdir = @srcdir@ -top_srcdir = @top_srcdir@ -VPATH = @srcdir@ -pkgdatadir = $(datadir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -top_builddir = ../../.. -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -INSTALL = @INSTALL@ -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ - $(top_srcdir)/config/commence-doc.am \ - $(top_srcdir)/config/commence.am -subdir = doc/html/ed_styles -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/configure.in -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(SHELL) $(top_srcdir)/bin/mkinstalldirs -CONFIG_HEADER = $(top_builddir)/src/H5config.h -CONFIG_CLEAN_FILES = -SOURCES = -DIST_SOURCES = -am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; -am__vpath_adj = case $$p in \ - $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ - *) f=$$p;; \ - esac; -am__strip_dir = `echo $$p | sed -e 's|^.*/||'`; -am__installdirs = "$(DESTDIR)$(localdocdir)" -localdocDATA_INSTALL = $(INSTALL_DATA) -DATA = $(localdoc_DATA) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) - -# Set the paths for AFS installs of autotools for Linux machines -# Ideally, these tools should never be needed during the build. -ACLOCAL = /afs/ncsa/projects/hdf/packages/automake_1.9.5/Linux_2.4/bin/aclocal -I /afs/ncsa/projects/hdf/packages/libtool_1.5.14/Linux_2.4/share/aclocal -ADD_PARALLEL_FILES = @ADD_PARALLEL_FILES@ -AMDEP_FALSE = @AMDEP_FALSE@ -AMDEP_TRUE = @AMDEP_TRUE@ -AMTAR = @AMTAR@ -AM_MAKEFLAGS = @AM_MAKEFLAGS@ -AR = @AR@ -AUTOCONF = /afs/ncsa/projects/hdf/packages/autoconf_2.59/Linux_2.4/bin/autoconf -AUTOHEADER = /afs/ncsa/projects/hdf/packages/autoconf_2.59/Linux_2.4/bin/autoheader -AUTOMAKE = /afs/ncsa/projects/hdf/packages/automake_1.9.5/Linux_2.4/bin/automake -AWK = @AWK@ -BUILD_CXX_CONDITIONAL_FALSE = @BUILD_CXX_CONDITIONAL_FALSE@ -BUILD_CXX_CONDITIONAL_TRUE = @BUILD_CXX_CONDITIONAL_TRUE@ -BUILD_FORTRAN_CONDITIONAL_FALSE = @BUILD_FORTRAN_CONDITIONAL_FALSE@ -BUILD_FORTRAN_CONDITIONAL_TRUE = @BUILD_FORTRAN_CONDITIONAL_TRUE@ -BUILD_HDF5_HL_CONDITIONAL_FALSE = @BUILD_HDF5_HL_CONDITIONAL_FALSE@ -BUILD_HDF5_HL_CONDITIONAL_TRUE = @BUILD_HDF5_HL_CONDITIONAL_TRUE@ -BUILD_PABLO_CONDITIONAL_FALSE = @BUILD_PABLO_CONDITIONAL_FALSE@ -BUILD_PABLO_CONDITIONAL_TRUE = @BUILD_PABLO_CONDITIONAL_TRUE@ -BUILD_PARALLEL_CONDITIONAL_FALSE = @BUILD_PARALLEL_CONDITIONAL_FALSE@ -BUILD_PARALLEL_CONDITIONAL_TRUE = @BUILD_PARALLEL_CONDITIONAL_TRUE@ -BUILD_PDB2HDF = @BUILD_PDB2HDF@ -BUILD_PDB2HDF_CONDITIONAL_FALSE = @BUILD_PDB2HDF_CONDITIONAL_FALSE@ -BUILD_PDB2HDF_CONDITIONAL_TRUE = @BUILD_PDB2HDF_CONDITIONAL_TRUE@ -BYTESEX = @BYTESEX@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CC_VERSION = @CC_VERSION@ -CFLAGS = @CFLAGS@ -CONFIG_DATE = @CONFIG_DATE@ -CONFIG_MODE = @CONFIG_MODE@ -CONFIG_USER = @CONFIG_USER@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CXX = @CXX@ -CXXCPP = @CXXCPP@ -CXXDEPMODE = @CXXDEPMODE@ -CXXFLAGS = @CXXFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEBUG_PKG = @DEBUG_PKG@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -DYNAMIC_DIRS = @DYNAMIC_DIRS@ -ECHO = @ECHO@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -F77 = @F77@ - -# Make sure that these variables are exported to the Makefiles -F9XMODEXT = @F9XMODEXT@ -F9XMODFLAG = @F9XMODFLAG@ -F9XSUFFIXFLAG = @F9XSUFFIXFLAG@ -FC = @FC@ -FCFLAGS = @FCFLAGS@ -FCLIBS = @FCLIBS@ -FFLAGS = @FFLAGS@ -FILTERS = @FILTERS@ -FSEARCH_DIRS = @FSEARCH_DIRS@ -H5_VERSION = @H5_VERSION@ -HADDR_T = @HADDR_T@ -HDF5_INTERFACES = @HDF5_INTERFACES@ -HID_T = @HID_T@ -HL = @HL@ -HL_FOR = @HL_FOR@ -HSIZET = @HSIZET@ -HSIZE_T = @HSIZE_T@ -HSSIZE_T = @HSSIZE_T@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -INSTRUMENT_LIBRARY = @INSTRUMENT_LIBRARY@ -LDFLAGS = @LDFLAGS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LIBTOOL = @LIBTOOL@ -LN_S = @LN_S@ -LTLIBOBJS = @LTLIBOBJS@ -LT_STATIC_EXEC = @LT_STATIC_EXEC@ -MAINT = @MAINT@ -MAINTAINER_MODE_FALSE = @MAINTAINER_MODE_FALSE@ -MAINTAINER_MODE_TRUE = @MAINTAINER_MODE_TRUE@ -MAKEINFO = @MAKEINFO@ -MPE = @MPE@ -OBJECT_NAMELEN_DEFAULT_F = @OBJECT_NAMELEN_DEFAULT_F@ -OBJEXT = @OBJEXT@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PARALLEL = @PARALLEL@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PERL = @PERL@ -PTHREAD = @PTHREAD@ -RANLIB = @RANLIB@ -ROOT = @ROOT@ -RUNPARALLEL = @RUNPARALLEL@ -RUNSERIAL = @RUNSERIAL@ -R_INTEGER = @R_INTEGER@ -R_LARGE = @R_LARGE@ -SEARCH = @SEARCH@ -SETX = @SETX@ -SET_MAKE = @SET_MAKE@ - -# Hardcode SHELL to be /bin/sh. Most machines have this shell, and -# on at least one machine configure fails to detect its existence (janus). -# Also, when HDF5 is configured on one machine but run on another, -# configure's automatic SHELL detection may not work on the build machine. -SHELL = /bin/sh -SIZE_T = @SIZE_T@ -STATIC_SHARED = @STATIC_SHARED@ -STRIP = @STRIP@ -TESTPARALLEL = @TESTPARALLEL@ -TRACE_API = @TRACE_API@ -USE_FILTER_DEFLATE = @USE_FILTER_DEFLATE@ -USE_FILTER_FLETCHER32 = @USE_FILTER_FLETCHER32@ -USE_FILTER_NBIT = @USE_FILTER_NBIT@ -USE_FILTER_SCALEOFFSET = @USE_FILTER_SCALEOFFSET@ -USE_FILTER_SHUFFLE = @USE_FILTER_SHUFFLE@ -USE_FILTER_SZIP = @USE_FILTER_SZIP@ -VERSION = @VERSION@ -ac_ct_AR = @ac_ct_AR@ -ac_ct_CC = @ac_ct_CC@ -ac_ct_CXX = @ac_ct_CXX@ -ac_ct_F77 = @ac_ct_F77@ -ac_ct_FC = @ac_ct_FC@ -ac_ct_RANLIB = @ac_ct_RANLIB@ -ac_ct_STRIP = @ac_ct_STRIP@ -am__fastdepCC_FALSE = @am__fastdepCC_FALSE@ -am__fastdepCC_TRUE = @am__fastdepCC_TRUE@ -am__fastdepCXX_FALSE = @am__fastdepCXX_FALSE@ -am__fastdepCXX_TRUE = @am__fastdepCXX_TRUE@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_vendor = @build_vendor@ -datadir = @datadir@ -exec_prefix = @exec_prefix@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ - -# Install directories that automake doesn't know about -includedir = $(exec_prefix)/include -infodir = @infodir@ -install_sh = @install_sh@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localstatedir = @localstatedir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -oldincludedir = @oldincludedir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ - -# Shell commands used in Makefiles -RM = rm -f -CP = cp - -# Some machines need a command to run executables; this is that command -# so that our tests will run. -# We use RUNTESTS instead of RUNSERIAL directly because it may be that -# some tests need to be run with a different command. Older versions -# of the makefiles used the command -# $(LIBTOOL) --mode=execute -# in some directories, for instance. -RUNTESTS = $(RUNSERIAL) - -# Libraries to link to while building -LIBHDF5 = $(top_builddir)/src/libhdf5.la -LIBH5TEST = $(top_builddir)/test/libh5test.la -LIBH5F = $(top_builddir)/fortran/src/libhdf5_fortran.la -LIBH5FTEST = $(top_builddir)/fortran/test/libh5test_fortran.la -LIBH5CPP = $(top_builddir)/c++/src/libhdf5_cpp.la -LIBH5TOOLS = $(top_builddir)/tools/lib/libh5tools.la -LIBH5_HL = $(top_builddir)/hl/src/libhdf5_hl.la -LIBH5F_HL = $(top_builddir)/hl/fortran/src/libhdf5hl_fortran.la -LIBH5CPP_HL = $(top_builddir)/hl/c++/src/libhdf5_hl_cpp.la -docdir = $(exec_prefix)/doc - -# Scripts used to build examples -H5CC = $(bindir)/h5cc -H5CC_PP = $(bindir)/h5pcc -H5FC = $(bindir)/h5fc -H5FC_PP = $(bindir)/h5pfc - -# .chkexe and .chksh files are used to mark tests that have run successfully. -MOSTLYCLEANFILES = *.chkexe *.chksh -localdocdir = $(docdir)/hdf5/ed_styles - -# Public doc files (to be installed)... -localdoc_DATA = FormatElect.css FormatPrint.css GenElect.css GenPrint.css \ - IndexElect.css IndexPrint.css IntroElect.css IntroPrint.css \ - RMelect.css RMprint.css UGelect.css UGprint.css - -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/config/commence-doc.am $(top_srcdir)/config/commence.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \ - && exit 0; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign doc/html/ed_styles/Makefile'; \ - cd $(top_srcdir) && \ - $(AUTOMAKE) --foreign doc/html/ed_styles/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -mostlyclean-libtool: - -rm -f *.lo - -clean-libtool: - -rm -rf .libs _libs - -distclean-libtool: - -rm -f libtool -uninstall-info-am: -install-localdocDATA: $(localdoc_DATA) - @$(NORMAL_INSTALL) - test -z "$(localdocdir)" || $(mkdir_p) "$(DESTDIR)$(localdocdir)" - @list='$(localdoc_DATA)'; for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - f=$(am__strip_dir) \ - echo " $(localdocDATA_INSTALL) '$$d$$p' '$(DESTDIR)$(localdocdir)/$$f'"; \ - $(localdocDATA_INSTALL) "$$d$$p" "$(DESTDIR)$(localdocdir)/$$f"; \ - done - -uninstall-localdocDATA: - @$(NORMAL_UNINSTALL) - @list='$(localdoc_DATA)'; for p in $$list; do \ - f=$(am__strip_dir) \ - echo " rm -f '$(DESTDIR)$(localdocdir)/$$f'"; \ - rm -f "$(DESTDIR)$(localdocdir)/$$f"; \ - done -tags: TAGS -TAGS: - -ctags: CTAGS -CTAGS: - - -distdir: $(DISTFILES) - $(mkdir_p) $(distdir)/../../../config - @srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's|.|.|g'`; \ - list='$(DISTFILES)'; for file in $$list; do \ - case $$file in \ - $(srcdir)/*) file=`echo "$$file" | sed "s|^$$srcdirstrip/||"`;; \ - $(top_srcdir)/*) file=`echo "$$file" | sed "s|^$$topsrcdirstrip/|$(top_builddir)/|"`;; \ - esac; \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - dir=`echo "$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test "$$dir" != "$$file" && test "$$dir" != "."; then \ - dir="/$$dir"; \ - $(mkdir_p) "$(distdir)$$dir"; \ - else \ - dir=''; \ - fi; \ - if test -d $$d/$$file; then \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \ - fi; \ - cp -pR $$d/$$file $(distdir)$$dir || exit 1; \ - else \ - test -f $(distdir)/$$file \ - || cp -p $$d/$$file $(distdir)/$$file \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: - for dir in "$(DESTDIR)$(localdocdir)"; do \ - test -z "$$dir" || $(mkdir_p) "$$dir"; \ - done -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - `test -z '$(STRIP)' || \ - echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install -mostlyclean-generic: - -test -z "$(MOSTLYCLEANFILES)" || rm -f $(MOSTLYCLEANFILES) - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." -clean: clean-am - -clean-am: clean-generic clean-libtool mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic distclean-libtool - -dvi: dvi-am - -dvi-am: - -html: html-am - -info: info-am - -info-am: - -install-data-am: install-localdocDATA - -install-exec-am: - -install-info: install-info-am - -install-man: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic mostlyclean-libtool - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: uninstall-info-am uninstall-localdocDATA - -.PHONY: all all-am check check-am clean clean-generic clean-libtool \ - distclean distclean-generic distclean-libtool distdir dvi \ - dvi-am html html-am info info-am install install-am \ - install-data install-data-am install-exec install-exec-am \ - install-info install-info-am install-localdocDATA install-man \ - install-strip installcheck installcheck-am installdirs \ - maintainer-clean maintainer-clean-generic mostlyclean \ - mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ - uninstall uninstall-am uninstall-info-am \ - uninstall-localdocDATA - - -# Ignore most rules -lib progs check test _test check-p check-s: - @echo "Nothing to be done" - -tests dep depend: - @@SETX@; for d in X $(SUBDIRS); do \ - if test $$d != X; then \ - (cd $$d && $(MAKE) $(AM_MAKEFLAGS) $@) || exit 1; \ - fi; - done - -# In docs directory, install-doc is the same as install -install-doc install-all: - $(MAKE) $(AM_MAKEFLAGS) install -uninstall-doc uninstall-all: - $(MAKE) $(AM_MAKEFLAGS) uninstall -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/doc/html/ed_styles/RMelect.css b/doc/html/ed_styles/RMelect.css deleted file mode 100644 index 478f4e3..0000000 --- a/doc/html/ed_styles/RMelect.css +++ /dev/null @@ -1,39 +0,0 @@ - \ No newline at end of file diff --git a/doc/html/ed_styles/RMprint.css b/doc/html/ed_styles/RMprint.css deleted file mode 100644 index 6b25a73..0000000 --- a/doc/html/ed_styles/RMprint.css +++ /dev/null @@ -1,58 +0,0 @@ - \ No newline at end of file diff --git a/doc/html/ed_styles/UGelect.css b/doc/html/ed_styles/UGelect.css deleted file mode 100644 index cd181cd..0000000 --- a/doc/html/ed_styles/UGelect.css +++ /dev/null @@ -1,35 +0,0 @@ - \ No newline at end of file diff --git a/doc/html/ed_styles/UGprint.css b/doc/html/ed_styles/UGprint.css deleted file mode 100644 index 6b25a73..0000000 --- a/doc/html/ed_styles/UGprint.css +++ /dev/null @@ -1,58 +0,0 @@ - \ No newline at end of file diff --git a/doc/html/extern1.gif b/doc/html/extern1.gif deleted file mode 100644 index dcac681..0000000 Binary files a/doc/html/extern1.gif and /dev/null differ diff --git a/doc/html/extern1.obj b/doc/html/extern1.obj deleted file mode 100644 index 9c56a50..0000000 --- a/doc/html/extern1.obj +++ /dev/null @@ -1,40 +0,0 @@ -%TGIF 3.0-p5 -state(0,33,100,0,0,0,16,1,9,1,1,0,0,1,0,1,0,'Courier',0,17,0,0,0,10,0,0,1,1,0,16,0,0,1,1,1,0,1088,1408,0,0,2880). -% -% @(#)$Header$ -% %W% -% -unit("1 pixel/pixel"). -page(1,"",1). -box('black',128,96,192,128,4,1,1,49,0,0,0,0,0,'1',[ -]). -box('black',192,96,352,128,12,1,1,50,0,0,0,0,0,'1',[ -]). -box('black',352,96,416,128,18,1,1,51,0,0,0,0,0,'1',[ -]). -box('black',64,176,224,208,12,1,1,53,0,0,0,0,0,'1',[ -]). -box('black',256,176,320,208,4,1,1,54,0,0,0,0,0,'1',[ -]). -box('black',352,176,448,208,18,1,1,55,0,0,0,0,0,'1',[ -]). -box('black',224,176,256,208,0,1,1,56,0,0,0,0,0,'1',[ -]). -box('black',320,176,352,208,0,1,1,57,0,0,0,0,0,'1',[ -]). -box('black',448,176,512,208,0,1,1,58,0,0,0,0,0,'1',[ -]). -poly('black',2,[ - 176,128,272,176],1,1,1,59,0,0,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -poly('black',2,[ - 240,128,208,176],1,1,1,60,0,0,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -poly('black',2,[ - 384,128,384,176],1,1,1,61,0,0,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -box('black',48,80,528,224,0,1,1,64,0,0,0,0,0,'1',[ -]). diff --git a/doc/html/extern2.gif b/doc/html/extern2.gif deleted file mode 100644 index 5f0e942..0000000 Binary files a/doc/html/extern2.gif and /dev/null differ diff --git a/doc/html/extern2.obj b/doc/html/extern2.obj deleted file mode 100644 index 3e83452..0000000 --- a/doc/html/extern2.obj +++ /dev/null @@ -1,108 +0,0 @@ -%TGIF 3.0-p5 -state(0,33,100,0,0,0,16,1,9,1,1,1,1,0,0,1,1,'Courier',0,17,0,0,0,10,0,0,1,1,0,16,0,0,1,1,1,0,1088,1408,0,0,2880). -% -% @(#)$Header$ -% %W% -% -unit("1 pixel/pixel"). -page(1,"",1). -box('black',48,48,464,432,0,1,1,144,0,0,0,0,0,'1',[ -]). -text('black',80,240,'Courier',0,17,1,0,0,1,70,14,146,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "scan1.data"]). -text('black',80,304,'Courier',0,17,1,0,0,1,70,14,148,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "scan2.data"]). -text('black',80,368,'Courier',0,17,1,0,0,1,70,14,150,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "scan3.data"]). -polygon('black',7,[ - 64,64,64,128,192,128,192,96,320,96,320,64,64,64],20,1,1,0,181,0,0,0,0,0,'1', - "00",[ -]). -polygon('black',7,[ - 64,128,64,160,320,160,320,96,192,96,192,128,64,128],4,1,1,0,182,0,0,0,0,0,'1', - "00",[ -]). -box('black',64,160,320,192,26,1,1,183,0,0,0,0,0,'1',[ -]). -poly('black',2,[ - 80,80,304,80],1,1,1,184,0,26,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -poly('black',2,[ - 80,112,176,112],1,1,1,185,0,26,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -poly('black',2,[ - 208,112,304,112],1,1,1,186,0,26,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -poly('black',2,[ - 80,144,304,144],1,1,1,187,0,26,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -poly('black',2,[ - 80,176,304,176],1,1,1,188,0,26,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -box('black',64,256,448,288,20,1,1,203,0,0,0,0,0,'1',[ -]). -box('black',64,320,448,352,4,1,1,216,0,0,0,0,0,'1',[ -]). -box('black',64,384,320,416,26,1,1,225,0,0,0,0,0,'1',[ -]). -poly('black',2,[ - 80,272,304,272],1,1,1,226,0,26,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -poly('black',2,[ - 336,272,432,272],1,1,1,227,0,26,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -poly('black',2,[ - 80,336,176,336],1,1,1,228,0,26,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -poly('black',2,[ - 208,336,432,336],1,1,1,229,0,26,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -poly('black',2,[ - 80,400,304,400],1,1,1,230,0,26,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -poly('black',2,[ - 192,96,64,96],0,1,1,232,0,26,5,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -poly('black',2,[ - 192,128,320,128],0,1,1,233,0,26,5,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -poly('black',2,[ - 256,64,256,192],0,1,1,234,0,26,5,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -poly('black',2,[ - 192,64,192,192],0,1,1,235,0,26,5,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -poly('black',2,[ - 128,64,128,192],0,1,1,236,0,26,5,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -poly('black',2,[ - 320,160,64,160],0,2,1,238,0,26,0,0,10,4,0,0,0,'2','10','4', - "0",[ -]). -poly('black',4,[ - 320,96,192,96,192,128,64,128],0,2,1,240,0,0,0,0,10,4,0,0,0,'2','10','4', - "0",[ -]). -poly('black',6,[ - 336,64,384,64,384,128,384,128,384,192,336,192],3,1,1,241,1,0,0,0,8,3,0,0,0,'1','8','3', - "78",[ -]). -text('black',429,124,'Courier',0,17,2,1,0,1,28,49,250,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,1,0,[ - 429,124,405,124,454,152,0,1000,-1000,0,-15,2,404,123,455,153],[ - "2-d", - "Dataset"]). diff --git a/doc/html/fortran/F90Flags.html b/doc/html/fortran/F90Flags.html deleted file mode 100644 index 8619092..0000000 --- a/doc/html/fortran/F90Flags.html +++ /dev/null @@ -1,332 +0,0 @@ - - -HDF5 Fortran90 Flags and Datatypes - - - - - - - - - - - - -
      - -
      -

      HDF5 Fortran90 Flags and Datatypes

      -
      - -
      -
      -
      - -

      Fortran90 Datatypes

      - -The Fortran90 HDF5 datatypes -are listed in HDF5 Predefined Datatypes - -
      -
      -
      - -

      Fortran90 Flags

      - -The Fortran90 HDF5 flags have the same meanings as the C flags defined in the -HDF5 Reference Manual and the -HDF5 User's Guide. - - -

      File access flags

      - - - - -
      -
      -      H5F_ACC_RDWR_F 
      -      H5F_ACC_RDONLY_F
      -      H5F_ACC_TRUNC_F
      -
      -
      -
      -      H5F_ACC_EXCL_F
      -      H5F_ACC_DEBUG_F
      -
      -
      -
      -      H5F_SCOPE_LOCAL_F
      -      H5F_SCOPE_GLOBAL_F
      -
      -
      - - -

      Group management flags

      - - - - -
      -
      -      H5G_UNKNOWN_F
      -      H5G_LINK_F
      -      H5G_GROUP_F
      -
      -
      -
      -      H5G_DATASET_F
      -      H5G_TYPE_F
      -      H5G_LINK_ERROR_F
      -
      -
      -
      -      H5G_LINK_HARD_F
      -      H5G_LINK_SOFT_F
      -
      -
      - - -

      Dataset format flags

      - - - - -
      -
      -      H5D_COMPACT_F  
      -
      -
      -
      -      H5D_CONTIGUOUS_F
      -
      -
      -
      -      H5D_CHUNKED_F
      -
      -
      - - -

      MPI IO data transfer flags

      - - - - -
      -
      -      H5FD_MPIO_INDEPENDENT_F 
      -
      -
      -
      -      H5FD_MPIO_COLLECTIVE_F
      -
      -
      -
       
      -      
      -
      -
      - - -

      Error flags

      - - - - -
      -
      -      H5E_NONE_MAJOR_F 
      -      H5E_ARGS_F 
      -      H5E_RESOURCE_F 
      -      H5E_INTERNAL_F 
      -      H5E_FILE_F 
      -      H5E_IO_F 
      -      H5E_FUNC_F 
      -      H5E_ATOM_F 
      -
      -
      -
      -      H5E_CACHE_F 
      -      H5E_BTREE_F 
      -      H5E_SYM_F 
      -      H5E_HEAP_F 
      -      H5E_OHDR_F 
      -      H5E_DATATYPE_F 
      -      H5E_DATASPACE_F 
      -      H5E_DATASET_F 
      -
      -
      -
      -      H5E_STORAGE_F 
      -      H5E_PLIST_F 
      -      H5E_ATTR_F 
      -      H5E_PLINE_F 
      -      H5E_EFL_F 
      -      H5E_REFERENCE_F
      -      H5E_VFL_F 
      -      H5E_TBBT_F 
      -
      -
      - - -

      Object identifier flags

      - - - - -
      -
      -      H5I_FILE_F
      -      H5I_GROUP_F
      -      H5I_DATATYPE_F
      -
      -
      -
      -      H5I_DATASPACE_F
      -      H5I_DATASET_F
      -      H5I_ATTR_F
      -
      -
      -
      -      H5I_BADID_F
      -
      -
      - - -

      Property list flags

      - - - - -
      -
      -      H5P_FILE_CREATE_F 
      -      H5P_FILE_ACCESS_F 
      -
      -
      -
      -      H5P_DATASET_CREATE_F
      -      H5P_DATASET_XFER_F 
      -
      -
      -
      -      H5P_MOUNT_F 
      -      H5P_DEFAULT_F 
      -
      -
      - - -

      Reference pointer flags

      - - - - -
      -
      -      H5R_OBJECT_F
      -
      -
      -
      -      H5R_DATASET_REGION_F
      -
      -
      -
       
      -      
      -
      -
      - -

      Dataspace flags

      - - - - -
      -
      -      H5S_SCALAR_F 
      -      H5S_SIMPLE_F 
      -
      -
      -
      -      H5S_SELECT_SET_F
      -      H5S_SELECT_OR_F
      -
      -
      -
      -      H5S_UNLIMITED_F
      -      H5S_ALL_F
      -
      -
      - - -

      Datatype flags

      - - - - -
      -
      -      H5T_NO_CLASS_F 
      -      H5T_INTEGER_F 
      -      H5T_FLOAT_F  
      -      H5T_TIME_F 
      -      H5T_STRING_F 
      -      H5T_BITFIELD_F
      -      H5T_OPAQUE_F 
      -      H5T_COMPOUND_F 
      -      H5T_REFERENCE_F
      -      H5T_ENUM_F 
      -
      -
      -
      -      H5T_ORDER_LE_F 
      -      H5T_ORDER_BE_F
      -      H5T_ORDER_VAX_F
      -      H5T_PAD_ZERO_F
      -      H5T_PAD_ONE_F
      -      H5T_PAD_BACKGROUND_F
      -      H5T_PAD_ERROR_F    
      -      H5T_SGN_NONE_F   
      -      H5T_SGN_2_F     
      -      H5T_SGN_ERROR_F
      -
      -
      -
      -      H5T_NORM_IMPLIED_F
      -      H5T_NORM_MSBSET_F
      -      H5T_NORM_NONE_F 
      -      H5T_CSET_ASCII_F
      -      H5T_STR_NULLTERM_F 
      -      H5T_STR_NULLPAD_F 
      -      H5T_STR_SPACEPAD_F
      -      H5T_STR_ERROR_F
      -
      -
      - - -
      - - -
      -HDF Help Desk -
      -Describes HDF5 Release 1.7, the unreleased development branch; working toward HDF5 Release 1.8.0 -
      - -Last modified: 3 April 2001 - - - diff --git a/doc/html/fortran/F90UserNotes.html b/doc/html/fortran/F90UserNotes.html deleted file mode 100644 index d263cb0..0000000 --- a/doc/html/fortran/F90UserNotes.html +++ /dev/null @@ -1,141 +0,0 @@ - - -HDF5 Fortran90 User's Notes - - - - - - - - - -
      -
      -                          HDF5 Fortran90 User's Notes
      -                          ===========================
      -
      -About the source code organization
      -==================================
      -
      -The Fortran APIs are organized in modules parallel to the HDF5 Interfaces.
      -Each module is in a separate file with the name H5*ff.f.  Corresponding C
      -stubs are in the H5*f.c files.  For example, the Fortran File APIs are in 
      -the file H5Fff.f and the corresponding C stubs are in the file H5Ff.c. 
      -
      -Each module contains Fortran definitions of the constants, interfaces to 
      -the subroutines if needed, and the subroutines themselves.  
      -
      -Users must use constant names in their programs instead of the numerical 
      -values, as the numerical values are subject to change without notice. 
      -
      -About the Fortran APIs 
      -=======================
      -
      -*  The Fortran APIs come in the form of Fortran subroutines.
      -
      -*  Each Fortran subroutine name is derived from the corresponding C function
      -   name by adding "_f" to the name.  For example, the name of the C function 
      -   to create an HDF5 file is H5Fcreate;  the corresponding Fortran subroutine 
      -   is h5fcreate_f. 
      -
      -*  A description of each implemented Fortran subroutine and its parameters 
      -   can be found following the description of the corresponding C function in 
      -   the HDF5 Reference Manual provided with this release.  
      -
      -*  The parameter list for each Fortran subroutine has two more parameters
      -   than the corresponding C function.  These additional parameters hold 
      -   the return value and an error code.  The order of the Fortran subroutine 
      -   parameters may differ from the order of the C function parameters. 
      -   The Fortran subroutine parameters are listed in the following order: 
      -      -- required input parameters,
      -      -- output parameters, including return value and error code, and 
      -      -- optional input parameters.
      -   For example, the C function to create a dataset has the following 
      -   prototype:
      -
      -       hid_t H5Dcreate(hid_it loc_id, char *name, hid_t type_id, 
      -             hid_t space_id, hid_t creation_prp);
      -   
      -   The corresponding Fortran subroutine has the following form:
      -   
      -       SUBROUTINE h5dcreate_f(loc_id, name, type_id, space_id, dset_id, 
      -             hdferr, creation_prp)
      -  
      -   The first four parameters of the Fortran subroutine correspond to the 
      -   C function parameters.  The fifth parameter, dset_id, is an output 
      -   parameter and contains a valid dataset identifier if the value of the 
      -   sixth output parameter hdferr indicates successful completion. 
      -   (Error code descriptions are provided with the subroutine descriptions 
      -   in the Reference Manual.)  The seventh input parameter, creation_prp, 
      -
      -   is optional, and may be omitted when the default creation property 
      -   list is used.
      -
      -*  Parameters to the Fortran subroutines have one of the following 
      -   predefined datatypes (see the file H5fortran_types.f90 for KIND 
      -   definitions):
      -   
      -        INTEGER(HID_T)      compares with hid_t type in HDF5 C APIs
      -        INTEGER(HSIZE_T)    compares with hsize_t in HDF5 C APIs
      -        INTEGER(HSSIZE_T)   compares with hssize_t in HDF5 C APIs
      -        INTEGER(SIZE_T)     compares with the C size_t type
      -
      -   These integer types usually correspond to 4 or 8 byte integers, 
      -   depending on the FORTRAN90 compiler and the corresponding HDF5 
      -   C library definitions.
      -   
      -   The H5R module defines two types of references: 
      -        TYPE(HOBJ_REF_T_F)      compares to hobj_ref_t in HDF5 C API
      -        TYPE(HDSET_REG_REF_T_F) compares to hdset_reg_ref_t in HDF5 C API
      -
      -*  Each Fortran application must call the h5open_f subroutine to 
      -   initialize the Fortran interface and the HDF5 C Library before calling 
      -   any HDF5 Fortran subroutine. The application must call the h5close_f 
      -   subroutine after all calls to the HDF5 Fortran Library to close the 
      -   Fortran interface and HDF5 C Library. 
      -
      -*  List of the predefined datatypes can be found in the HDF5 Reference 
      -   Manual provided with this release.  See HDF5 Predefined Datatypes.
      -
      -*  When a C application reads data stored from a Fortran program, the data 
      -   will appear to be transposed due to the difference in the C and Fortran 
      -   storage orders.  For example, if Fortran writes a 4x6 two-dimensional 
      -   dataset to the file, a C program will read it as a 6x4 two-dimensional 
      -   dataset into memory.  The HDF5 C utilities h5dump and h5ls will also 
      -   display transposed data, if data is written from a Fortran program.
      -
      -*  Fortran indices are 1-based.
      -
      -*  Compound datatype datasets can be written or read by atomic fields only.
      -
      -
      - -
      - - -
      -HDF Help Desk -
      -Describes HDF5 Release 1.7, the unreleased development branch; working toward HDF5 Release 1.8.0 -
      - -Last modified: 15 December 2000 - - - - diff --git a/doc/html/fortran/Makefile.am b/doc/html/fortran/Makefile.am deleted file mode 100644 index 3b21a9c..0000000 --- a/doc/html/fortran/Makefile.am +++ /dev/null @@ -1,17 +0,0 @@ -# HDF5 Library Doc Makefile(.in) -# -# Copyright (C) 1997, 2002 -# National Center for Supercomputing Applications. -# All rights reserved. -# -## -## Makefile.am -## Run automake to generate a Makefile.in from this file. -# - -include $(top_srcdir)/config/commence-doc.am - -localdocdir = $(docdir)/hdf5/fortran - -# Public doc files (to be installed)... -localdoc_DATA=F90Flags.html F90UserNotes.html diff --git a/doc/html/fortran/Makefile.in b/doc/html/fortran/Makefile.in deleted file mode 100644 index d6e9343..0000000 --- a/doc/html/fortran/Makefile.in +++ /dev/null @@ -1,485 +0,0 @@ -# Makefile.in generated by automake 1.9.5 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005 Free Software Foundation, Inc. -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# HDF5 Library Doc Makefile(.in) -# -# Copyright (C) 1997, 2002 -# National Center for Supercomputing Applications. -# All rights reserved. -# -# - -srcdir = @srcdir@ -top_srcdir = @top_srcdir@ -VPATH = @srcdir@ -pkgdatadir = $(datadir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -top_builddir = ../../.. -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -INSTALL = @INSTALL@ -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ - $(top_srcdir)/config/commence-doc.am \ - $(top_srcdir)/config/commence.am -subdir = doc/html/fortran -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/configure.in -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(SHELL) $(top_srcdir)/bin/mkinstalldirs -CONFIG_HEADER = $(top_builddir)/src/H5config.h -CONFIG_CLEAN_FILES = -SOURCES = -DIST_SOURCES = -am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; -am__vpath_adj = case $$p in \ - $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ - *) f=$$p;; \ - esac; -am__strip_dir = `echo $$p | sed -e 's|^.*/||'`; -am__installdirs = "$(DESTDIR)$(localdocdir)" -localdocDATA_INSTALL = $(INSTALL_DATA) -DATA = $(localdoc_DATA) -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) - -# Set the paths for AFS installs of autotools for Linux machines -# Ideally, these tools should never be needed during the build. -ACLOCAL = /afs/ncsa/projects/hdf/packages/automake_1.9.5/Linux_2.4/bin/aclocal -I /afs/ncsa/projects/hdf/packages/libtool_1.5.14/Linux_2.4/share/aclocal -ADD_PARALLEL_FILES = @ADD_PARALLEL_FILES@ -AMDEP_FALSE = @AMDEP_FALSE@ -AMDEP_TRUE = @AMDEP_TRUE@ -AMTAR = @AMTAR@ -AM_MAKEFLAGS = @AM_MAKEFLAGS@ -AR = @AR@ -AUTOCONF = /afs/ncsa/projects/hdf/packages/autoconf_2.59/Linux_2.4/bin/autoconf -AUTOHEADER = /afs/ncsa/projects/hdf/packages/autoconf_2.59/Linux_2.4/bin/autoheader -AUTOMAKE = /afs/ncsa/projects/hdf/packages/automake_1.9.5/Linux_2.4/bin/automake -AWK = @AWK@ -BUILD_CXX_CONDITIONAL_FALSE = @BUILD_CXX_CONDITIONAL_FALSE@ -BUILD_CXX_CONDITIONAL_TRUE = @BUILD_CXX_CONDITIONAL_TRUE@ -BUILD_FORTRAN_CONDITIONAL_FALSE = @BUILD_FORTRAN_CONDITIONAL_FALSE@ -BUILD_FORTRAN_CONDITIONAL_TRUE = @BUILD_FORTRAN_CONDITIONAL_TRUE@ -BUILD_HDF5_HL_CONDITIONAL_FALSE = @BUILD_HDF5_HL_CONDITIONAL_FALSE@ -BUILD_HDF5_HL_CONDITIONAL_TRUE = @BUILD_HDF5_HL_CONDITIONAL_TRUE@ -BUILD_PABLO_CONDITIONAL_FALSE = @BUILD_PABLO_CONDITIONAL_FALSE@ -BUILD_PABLO_CONDITIONAL_TRUE = @BUILD_PABLO_CONDITIONAL_TRUE@ -BUILD_PARALLEL_CONDITIONAL_FALSE = @BUILD_PARALLEL_CONDITIONAL_FALSE@ -BUILD_PARALLEL_CONDITIONAL_TRUE = @BUILD_PARALLEL_CONDITIONAL_TRUE@ -BUILD_PDB2HDF = @BUILD_PDB2HDF@ -BUILD_PDB2HDF_CONDITIONAL_FALSE = @BUILD_PDB2HDF_CONDITIONAL_FALSE@ -BUILD_PDB2HDF_CONDITIONAL_TRUE = @BUILD_PDB2HDF_CONDITIONAL_TRUE@ -BYTESEX = @BYTESEX@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CC_VERSION = @CC_VERSION@ -CFLAGS = @CFLAGS@ -CONFIG_DATE = @CONFIG_DATE@ -CONFIG_MODE = @CONFIG_MODE@ -CONFIG_USER = @CONFIG_USER@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CXX = @CXX@ -CXXCPP = @CXXCPP@ -CXXDEPMODE = @CXXDEPMODE@ -CXXFLAGS = @CXXFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEBUG_PKG = @DEBUG_PKG@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -DYNAMIC_DIRS = @DYNAMIC_DIRS@ -ECHO = @ECHO@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -F77 = @F77@ - -# Make sure that these variables are exported to the Makefiles -F9XMODEXT = @F9XMODEXT@ -F9XMODFLAG = @F9XMODFLAG@ -F9XSUFFIXFLAG = @F9XSUFFIXFLAG@ -FC = @FC@ -FCFLAGS = @FCFLAGS@ -FCLIBS = @FCLIBS@ -FFLAGS = @FFLAGS@ -FILTERS = @FILTERS@ -FSEARCH_DIRS = @FSEARCH_DIRS@ -H5_VERSION = @H5_VERSION@ -HADDR_T = @HADDR_T@ -HDF5_INTERFACES = @HDF5_INTERFACES@ -HID_T = @HID_T@ -HL = @HL@ -HL_FOR = @HL_FOR@ -HSIZET = @HSIZET@ -HSIZE_T = @HSIZE_T@ -HSSIZE_T = @HSSIZE_T@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -INSTRUMENT_LIBRARY = @INSTRUMENT_LIBRARY@ -LDFLAGS = @LDFLAGS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LIBTOOL = @LIBTOOL@ -LN_S = @LN_S@ -LTLIBOBJS = @LTLIBOBJS@ -LT_STATIC_EXEC = @LT_STATIC_EXEC@ -MAINT = @MAINT@ -MAINTAINER_MODE_FALSE = @MAINTAINER_MODE_FALSE@ -MAINTAINER_MODE_TRUE = @MAINTAINER_MODE_TRUE@ -MAKEINFO = @MAKEINFO@ -MPE = @MPE@ -OBJECT_NAMELEN_DEFAULT_F = @OBJECT_NAMELEN_DEFAULT_F@ -OBJEXT = @OBJEXT@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PARALLEL = @PARALLEL@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PERL = @PERL@ -PTHREAD = @PTHREAD@ -RANLIB = @RANLIB@ -ROOT = @ROOT@ -RUNPARALLEL = @RUNPARALLEL@ -RUNSERIAL = @RUNSERIAL@ -R_INTEGER = @R_INTEGER@ -R_LARGE = @R_LARGE@ -SEARCH = @SEARCH@ -SETX = @SETX@ -SET_MAKE = @SET_MAKE@ - -# Hardcode SHELL to be /bin/sh. Most machines have this shell, and -# on at least one machine configure fails to detect its existence (janus). -# Also, when HDF5 is configured on one machine but run on another, -# configure's automatic SHELL detection may not work on the build machine. -SHELL = /bin/sh -SIZE_T = @SIZE_T@ -STATIC_SHARED = @STATIC_SHARED@ -STRIP = @STRIP@ -TESTPARALLEL = @TESTPARALLEL@ -TRACE_API = @TRACE_API@ -USE_FILTER_DEFLATE = @USE_FILTER_DEFLATE@ -USE_FILTER_FLETCHER32 = @USE_FILTER_FLETCHER32@ -USE_FILTER_NBIT = @USE_FILTER_NBIT@ -USE_FILTER_SCALEOFFSET = @USE_FILTER_SCALEOFFSET@ -USE_FILTER_SHUFFLE = @USE_FILTER_SHUFFLE@ -USE_FILTER_SZIP = @USE_FILTER_SZIP@ -VERSION = @VERSION@ -ac_ct_AR = @ac_ct_AR@ -ac_ct_CC = @ac_ct_CC@ -ac_ct_CXX = @ac_ct_CXX@ -ac_ct_F77 = @ac_ct_F77@ -ac_ct_FC = @ac_ct_FC@ -ac_ct_RANLIB = @ac_ct_RANLIB@ -ac_ct_STRIP = @ac_ct_STRIP@ -am__fastdepCC_FALSE = @am__fastdepCC_FALSE@ -am__fastdepCC_TRUE = @am__fastdepCC_TRUE@ -am__fastdepCXX_FALSE = @am__fastdepCXX_FALSE@ -am__fastdepCXX_TRUE = @am__fastdepCXX_TRUE@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_vendor = @build_vendor@ -datadir = @datadir@ -exec_prefix = @exec_prefix@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ - -# Install directories that automake doesn't know about -includedir = $(exec_prefix)/include -infodir = @infodir@ -install_sh = @install_sh@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localstatedir = @localstatedir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -oldincludedir = @oldincludedir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ - -# Shell commands used in Makefiles -RM = rm -f -CP = cp - -# Some machines need a command to run executables; this is that command -# so that our tests will run. -# We use RUNTESTS instead of RUNSERIAL directly because it may be that -# some tests need to be run with a different command. Older versions -# of the makefiles used the command -# $(LIBTOOL) --mode=execute -# in some directories, for instance. -RUNTESTS = $(RUNSERIAL) - -# Libraries to link to while building -LIBHDF5 = $(top_builddir)/src/libhdf5.la -LIBH5TEST = $(top_builddir)/test/libh5test.la -LIBH5F = $(top_builddir)/fortran/src/libhdf5_fortran.la -LIBH5FTEST = $(top_builddir)/fortran/test/libh5test_fortran.la -LIBH5CPP = $(top_builddir)/c++/src/libhdf5_cpp.la -LIBH5TOOLS = $(top_builddir)/tools/lib/libh5tools.la -LIBH5_HL = $(top_builddir)/hl/src/libhdf5_hl.la -LIBH5F_HL = $(top_builddir)/hl/fortran/src/libhdf5hl_fortran.la -LIBH5CPP_HL = $(top_builddir)/hl/c++/src/libhdf5_hl_cpp.la -docdir = $(exec_prefix)/doc - -# Scripts used to build examples -H5CC = $(bindir)/h5cc -H5CC_PP = $(bindir)/h5pcc -H5FC = $(bindir)/h5fc -H5FC_PP = $(bindir)/h5pfc - -# .chkexe and .chksh files are used to mark tests that have run successfully. -MOSTLYCLEANFILES = *.chkexe *.chksh -localdocdir = $(docdir)/hdf5/fortran - -# Public doc files (to be installed)... -localdoc_DATA = F90Flags.html F90UserNotes.html -all: all-am - -.SUFFIXES: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/config/commence-doc.am $(top_srcdir)/config/commence.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \ - && exit 0; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign doc/html/fortran/Makefile'; \ - cd $(top_srcdir) && \ - $(AUTOMAKE) --foreign doc/html/fortran/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -mostlyclean-libtool: - -rm -f *.lo - -clean-libtool: - -rm -rf .libs _libs - -distclean-libtool: - -rm -f libtool -uninstall-info-am: -install-localdocDATA: $(localdoc_DATA) - @$(NORMAL_INSTALL) - test -z "$(localdocdir)" || $(mkdir_p) "$(DESTDIR)$(localdocdir)" - @list='$(localdoc_DATA)'; for p in $$list; do \ - if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ - f=$(am__strip_dir) \ - echo " $(localdocDATA_INSTALL) '$$d$$p' '$(DESTDIR)$(localdocdir)/$$f'"; \ - $(localdocDATA_INSTALL) "$$d$$p" "$(DESTDIR)$(localdocdir)/$$f"; \ - done - -uninstall-localdocDATA: - @$(NORMAL_UNINSTALL) - @list='$(localdoc_DATA)'; for p in $$list; do \ - f=$(am__strip_dir) \ - echo " rm -f '$(DESTDIR)$(localdocdir)/$$f'"; \ - rm -f "$(DESTDIR)$(localdocdir)/$$f"; \ - done -tags: TAGS -TAGS: - -ctags: CTAGS -CTAGS: - - -distdir: $(DISTFILES) - $(mkdir_p) $(distdir)/../../../config - @srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's|.|.|g'`; \ - list='$(DISTFILES)'; for file in $$list; do \ - case $$file in \ - $(srcdir)/*) file=`echo "$$file" | sed "s|^$$srcdirstrip/||"`;; \ - $(top_srcdir)/*) file=`echo "$$file" | sed "s|^$$topsrcdirstrip/|$(top_builddir)/|"`;; \ - esac; \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - dir=`echo "$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test "$$dir" != "$$file" && test "$$dir" != "."; then \ - dir="/$$dir"; \ - $(mkdir_p) "$(distdir)$$dir"; \ - else \ - dir=''; \ - fi; \ - if test -d $$d/$$file; then \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \ - fi; \ - cp -pR $$d/$$file $(distdir)$$dir || exit 1; \ - else \ - test -f $(distdir)/$$file \ - || cp -p $$d/$$file $(distdir)/$$file \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(DATA) -installdirs: - for dir in "$(DESTDIR)$(localdocdir)"; do \ - test -z "$$dir" || $(mkdir_p) "$$dir"; \ - done -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - `test -z '$(STRIP)' || \ - echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install -mostlyclean-generic: - -test -z "$(MOSTLYCLEANFILES)" || rm -f $(MOSTLYCLEANFILES) - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." -clean: clean-am - -clean-am: clean-generic clean-libtool mostlyclean-am - -distclean: distclean-am - -rm -f Makefile -distclean-am: clean-am distclean-generic distclean-libtool - -dvi: dvi-am - -dvi-am: - -html: html-am - -info: info-am - -info-am: - -install-data-am: install-localdocDATA - -install-exec-am: - -install-info: install-info-am - -install-man: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-generic mostlyclean-libtool - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: uninstall-info-am uninstall-localdocDATA - -.PHONY: all all-am check check-am clean clean-generic clean-libtool \ - distclean distclean-generic distclean-libtool distdir dvi \ - dvi-am html html-am info info-am install install-am \ - install-data install-data-am install-exec install-exec-am \ - install-info install-info-am install-localdocDATA install-man \ - install-strip installcheck installcheck-am installdirs \ - maintainer-clean maintainer-clean-generic mostlyclean \ - mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ - uninstall uninstall-am uninstall-info-am \ - uninstall-localdocDATA - - -# Ignore most rules -lib progs check test _test check-p check-s: - @echo "Nothing to be done" - -tests dep depend: - @@SETX@; for d in X $(SUBDIRS); do \ - if test $$d != X; then \ - (cd $$d && $(MAKE) $(AM_MAKEFLAGS) $@) || exit 1; \ - fi; - done - -# In docs directory, install-doc is the same as install -install-doc install-all: - $(MAKE) $(AM_MAKEFLAGS) install -uninstall-doc uninstall-all: - $(MAKE) $(AM_MAKEFLAGS) uninstall -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/doc/html/group_p1.gif b/doc/html/group_p1.gif deleted file mode 100644 index 5900446..0000000 Binary files a/doc/html/group_p1.gif and /dev/null differ diff --git a/doc/html/group_p1.obj b/doc/html/group_p1.obj deleted file mode 100644 index 5f41959..0000000 --- a/doc/html/group_p1.obj +++ /dev/null @@ -1,85 +0,0 @@ -%TGIF 3.0-p5 -state(0,33,100,0,0,0,8,1,9,1,1,0,2,1,0,1,1,'Times-Roman',0,24,0,0,0,10,0,0,1,1,0,16,0,0,1,1,1,0,1088,1408,0,0,2880). -% -% @(#)$Header$ -% %W% -% -unit("1 pixel/pixel"). -page(1,"",1). -text('black',80,168,'Courier',0,17,1,0,0,1,7,14,30,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "."]). -text('black',80,184,'Courier',0,17,1,0,0,1,7,14,34,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "."]). -text('black',80,200,'Courier',0,17,1,0,0,1,7,14,36,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "."]). -text('black',80,216,'Courier',0,17,1,0,0,1,21,14,38,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Foo"]). -text('black',80,232,'Courier',0,17,1,0,0,1,7,14,43,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "."]). -text('black',80,248,'Courier',0,17,1,0,0,1,7,14,47,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "."]). -rcbox('black',64,152,128,280,0,1,1,0,16,49,0,0,0,0,'1',[ -]). -text('black',208,152,'Courier',0,17,1,0,0,1,7,14,52,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "."]). -text('black',80,152,'Courier',0,17,1,0,0,1,7,14,56,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "."]). -text('black',208,168,'Courier',0,17,1,0,0,1,7,14,58,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "."]). -text('black',208,184,'Courier',0,17,1,0,0,1,21,14,60,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Bar"]). -text('black',208,200,'Courier',0,17,1,0,0,1,7,14,62,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "."]). -text('black',208,216,'Courier',0,17,1,0,0,1,7,14,64,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "."]). -text('black',208,232,'Courier',0,17,1,0,0,1,7,14,68,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "."]). -text('black',208,248,'Courier',0,17,1,0,0,1,7,14,72,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "."]). -rcbox('black',192,152,256,280,0,1,1,0,16,74,0,0,0,0,'1',[ -]). -text('black',336,152,'Courier',0,17,1,0,0,1,7,14,75,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "."]). -text('black',336,168,'Courier',0,17,1,0,0,1,7,14,77,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "."]). -text('black',336,184,'Courier',0,17,1,0,0,1,7,14,81,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "."]). -text('black',336,200,'Courier',0,17,1,0,0,1,7,14,88,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "."]). -text('black',336,216,'Courier',0,17,1,0,0,1,7,14,92,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "."]). -text('black',336,232,'Courier',0,17,1,0,0,1,7,14,94,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "."]). -text('black',336,248,'Courier',0,17,1,0,0,1,21,14,96,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Baz"]). -rcbox('black',320,152,384,280,0,1,1,0,16,98,0,0,0,0,'1',[ -]). -text('black',224,360,'NewCenturySchlbk-Roman',0,17,2,1,0,1,42,30,99,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Object", - "Header"]). -rcbox('black',192,344,256,408,0,1,1,0,16,101,0,0,0,0,'1',[ -]). -poly('black',4,[ - 112,224,136,216,152,184,192,168],1,1,1,102,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -poly('black',4,[ - 232,192,272,184,288,168,320,160],1,1,1,107,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -poly('black',4,[ - 368,256,416,272,392,336,256,352],1,1,1,110,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -text('black',96,128,'Times-Roman',0,17,1,1,0,1,40,15,120,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Group 1"]). -text('black',224,128,'Times-Roman',0,17,1,1,0,1,40,15,126,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Group 2"]). -text('black',352,128,'Times-Roman',0,17,1,1,0,1,40,15,130,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Group 3"]). -text('black',224,320,'Times-Roman',0,17,1,1,0,1,64,15,134,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Some Object"]). -text('black',224,80,'Times-Roman',0,24,1,1,0,1,258,28,138,0,22,6,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "The name \"/Foo/Bar/Baz\""]). -box('black',40,64,448,432,0,1,1,140,0,0,0,0,0,'1',[ -]). diff --git a/doc/html/group_p2.gif b/doc/html/group_p2.gif deleted file mode 100644 index a2d12a0..0000000 Binary files a/doc/html/group_p2.gif and /dev/null differ diff --git a/doc/html/group_p2.obj b/doc/html/group_p2.obj deleted file mode 100644 index cb91258..0000000 --- a/doc/html/group_p2.obj +++ /dev/null @@ -1,57 +0,0 @@ -%TGIF 3.0-p5 -state(0,33,100,0,0,0,8,1,9,1,1,0,2,1,0,1,0,'Courier',0,17,0,0,0,10,0,0,1,1,0,16,0,0,1,1,1,0,1088,1408,0,0,2880). -% -% @(#)$Header$ -% %W% -% -unit("1 pixel/pixel"). -page(1,"",1). -text('black',144,128,'Courier',0,17,1,0,0,1,7,14,26,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "."]). -text('black',144,144,'Courier',0,17,1,0,0,1,7,14,30,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "."]). -text('black',144,160,'Courier',0,17,1,0,0,1,21,14,34,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Foo"]). -text('black',144,176,'Courier',0,17,1,0,0,1,7,14,36,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "."]). -text('black',144,192,'Courier',0,17,1,0,0,1,7,14,38,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "."]). -rcbox('black',128,128,192,256,0,1,1,0,16,40,0,0,0,0,'1',[ -]). -text('black',144,320,'Courier',0,17,1,0,0,1,7,14,43,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "."]). -text('black',144,336,'Courier',0,17,1,0,0,1,7,14,45,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "."]). -text('black',144,352,'Courier',0,17,1,0,0,1,21,14,47,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Bar"]). -text('black',144,368,'Courier',0,17,1,0,0,1,7,14,49,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "."]). -text('black',144,384,'Courier',0,17,1,0,0,1,7,14,51,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "."]). -rcbox('black',128,320,192,448,0,1,1,0,16,53,0,0,0,0,'1',[ -]). -text('black',160,96,'NewCenturySchlbk-Roman',0,17,1,1,0,1,46,15,64,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Group 1"]). -text('black',160,288,'NewCenturySchlbk-Roman',0,17,1,1,0,1,46,15,68,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Group 2"]). -text('black',352,224,'NewCenturySchlbk-Roman',0,17,2,1,0,1,35,30,70,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Some", - "Object"]). -rcbox('black',320,256,384,384,0,1,1,0,16,72,0,0,0,0,'1',[ -]). -poly('black',4,[ - 176,168,224,192,264,240,320,264],1,1,1,73,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -poly('black',4,[ - 176,360,232,344,272,288,320,272],1,1,1,74,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -text('black',264,40,'Helvetica',0,24,1,1,0,1,206,29,93,0,24,5,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Hard Link Example"]). -box('black',88,24,424,496,0,1,1,95,0,0,0,0,0,'1',[ -]). -text('black',240,192,'Courier',0,17,1,0,0,1,63,14,129,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "hard link"]). -text('black',248,336,'Courier',0,17,1,0,0,1,63,14,131,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "hard link"]). diff --git a/doc/html/group_p3.gif b/doc/html/group_p3.gif deleted file mode 100644 index 85346de..0000000 Binary files a/doc/html/group_p3.gif and /dev/null differ diff --git a/doc/html/group_p3.obj b/doc/html/group_p3.obj deleted file mode 100644 index ad93444..0000000 --- a/doc/html/group_p3.obj +++ /dev/null @@ -1,59 +0,0 @@ -%TGIF 3.0-p5 -state(0,33,100,0,0,0,8,1,9,1,1,0,2,1,0,1,0,'Courier',0,17,0,0,0,10,0,0,1,1,0,16,0,0,1,1,1,0,1088,1408,0,0,2880). -% -% @(#)$Header$ -% %W% -% -unit("1 pixel/pixel"). -page(1,"",1). -text('black',144,128,'Courier',0,17,1,0,0,1,7,14,26,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "."]). -text('black',144,144,'Courier',0,17,1,0,0,1,7,14,30,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "."]). -text('black',144,160,'Courier',0,17,1,0,0,1,21,14,34,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Foo"]). -text('black',144,176,'Courier',0,17,1,0,0,1,7,14,36,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "."]). -text('black',144,192,'Courier',0,17,1,0,0,1,7,14,38,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "."]). -rcbox('black',128,128,192,256,0,1,1,0,16,40,0,0,0,0,'1',[ -]). -text('black',144,320,'Courier',0,17,1,0,0,1,7,14,43,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "."]). -text('black',144,336,'Courier',0,17,1,0,0,1,7,14,45,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "."]). -text('black',144,352,'Courier',0,17,1,0,0,1,21,14,47,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Bar"]). -text('black',144,368,'Courier',0,17,1,0,0,1,7,14,49,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "."]). -text('black',144,384,'Courier',0,17,1,0,0,1,7,14,51,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "."]). -rcbox('black',128,320,192,448,0,1,1,0,16,53,0,0,0,0,'1',[ -]). -text('black',160,96,'NewCenturySchlbk-Roman',0,17,1,1,0,1,46,15,64,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Group 1"]). -text('black',160,288,'NewCenturySchlbk-Roman',0,17,1,1,0,1,46,15,68,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Group 2"]). -text('black',352,96,'NewCenturySchlbk-Roman',0,17,2,1,0,1,35,30,70,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Some", - "Object"]). -rcbox('black',320,128,384,256,0,1,1,0,16,72,0,0,0,0,'1',[ -]). -text('black',264,40,'Helvetica',0,24,1,1,0,1,197,29,93,0,24,5,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Soft Link Example"]). -box('black',88,24,424,496,0,1,1,95,0,0,0,0,0,'1',[ -]). -text('black',320,352,'Courier',0,17,1,0,0,1,35,14,105,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "\"Foo\""]). -poly('black',4,[ - 176,168,232,160,264,144,320,136],1,1,1,111,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -poly('black',2,[ - 176,360,312,360],1,1,1,116,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -text('black',240,160,'Courier',0,17,1,0,0,1,63,14,119,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "hard link"]). -text('black',216,368,'Courier',0,17,1,0,0,1,63,14,121,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "soft link"]). diff --git a/doc/html/h5s.examples b/doc/html/h5s.examples deleted file mode 100644 index 688382f..0000000 --- a/doc/html/h5s.examples +++ /dev/null @@ -1,347 +0,0 @@ -Example 1: Create a simple fixed size 3-D dataspace in memory and on disk and - copy the entire dataset to disk. - -{ - hid_t file; /* File ID */ - hid_t dataset; /* Dataset ID */ - hid_t mem_space, file_space; /* Dataspaces for memory and the file */ - uint8 *buf; /* Buffer for data */ - hsize_t curr_dims[3]={3,4,5}; /* Dimensions of the dataset */ - - /* Create file */ - file = H5Fcreate("example1.h5", H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - - /* Create dataspace for dataset in the file */ - /* Selection for dataspace defaults to entire space */ - file_space=H5Screate(H5S_SIMPLE); - - /* Set the extent & type of the dataset's dataspace */ - H5Sset_extent_simple(file_space,3,curr_dims,curr_dims); - - /* Create the dataspace for the dataset in memory */ - /* Selection for dataspace defaults to entire space */ - mem_space=H5Screate(H5S_SIMPLE); - - /* Set the extent & type of the memory dataspace */ - H5Sset_extent_simple(mem_space,3,curr_dims,curr_dims); - - /* Create the dataset on disk */ - dataset=H5Dcreate(file,"Dataset",H5T_NATIVE_UINT8,file_space,H5P_DEFAULT); - - /* Write the dataset to the file */ - H5Dwrite(dataset,H5T_NATIVE_UINT8,mem_space,file_space,H5P_DEFAULT,buf); - - /* Close dataspaces */ - H5Sclose(mem_space); - H5Sclose(file_space); - - /* Close dataset & file */ - H5Dclose(dataset); - H5Fclose(file); -} - - -Example 2: Create a simple fixed size 3-D dataspace in memory and on disk and - copy a hyperslab to disk. The hyperslab blocks are packed and - contiguous in memory, but are scattered when written to the dataset - on disk. - -{ - hid_t file; /* File ID */ - hid_t dataset; /* Dataset ID */ - hid_t mem_space, file_space; /* Dataspaces for memory and the file */ - uint8 *buf; /* Buffer for data */ - hsize_t start[3]={3,4,5}; /* Start of hyperslab */ - hsize_t stride[3]={1,2,2}; /* Stride for hyperslab */ - hsize_t count[3]={3,3,3}; /* Hyperslab block count in each dimension */ - hsize_t block[3]={2,2,2}; /* Hyperslab block size in each dimension */ - hsize_t curr_dims[3]={13,14,15}; /* Dimensions of the dataset */ - - /* Create file */ - file = H5Fcreate("example2.h5", H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - - /* Create dataspace for dataset in the file */ - /* Selection for dataspace defaults to entire space */ - file_space=H5Screate(H5S_SIMPLE); - - /* Set the extent & type of the dataset's dataspace */ - H5Sset_extent_simple(file_space,3,curr_dims,curr_dims); - - /* Set the hyperslab selection for a file dataspace */ - H5Sselect_hyperslab(file_space,H5S_SELECT_SET,start,stride,count,block); - - /* Create the dataspace for the dataset in memory */ - /* Selection for dataspace defaults to entire space */ - mem_space=H5Screate(H5S_SIMPLE); - - /* Set the extent & type of the memory dataspace */ - /* Compute the memory dimensions based on the hyperslab blocks to write */ - for(i=0; i<3; i++) - curr_dims[i]=count[i]*block[i]; - H5Sset_extent_simple(mem_space,3,curr_dims,curr_dims); - - /* Create the dataset on disk */ - dataset=H5Dcreate(file,"Dataset",H5T_NATIVE_UINT8,file_space,H5P_DEFAULT); - - /* Write the hyperslab to the file */ - H5Dwrite(dataset,H5T_NATIVE_UINT8,mem_space,file_space,H5P_DEFAULT,buf); - - /* Close dataspaces */ - H5Sclose(mem_space); - H5Sclose(file_space); - - /* Close dataset & file */ - H5Dclose(dataset); - H5Fclose(file); -} - - -Example 3: Create a simple fixed size 3-D dataspace in memory and on disk and - copy a specific selection of points (with a particular order) to - disk. The memory and file dataspaces are different sizes, but the number - of points selected are the same. - -{ - hid_t file; /* File ID */ - hid_t dataset; /* Dataset ID */ - hid_t mem_space, file_space; /* Dataspaces for memory and the file */ - uint8 *buf; /* Buffer for data */ - hsize_t elements[5][3]; /* Dataspace elements selected */ - hsize_t curr_dims[3]={13,14,15}; /* Dimensions of the dataset */ - - /* Create file */ - file = H5Fcreate("example3.h5", H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - - /* Create dataspace for dataset in the file */ - /* Selection for dataspace defaults to entire space */ - file_space=H5Screate(H5S_SIMPLE); - - /* Set the extent & type of the dataset's dataspace */ - H5Sset_extent_simple(file_space,3,curr_dims,curr_dims); - - /* Set the elements for the selection in the file dataspace */ - elements[0]={0,2,4}; /* Yes, I know this won't compile.. :-) */ - elements[1]={3,4,1}; - elements[2]={9,8,3}; - elements[3]={7,2,0}; - elements[4]={6,5,8}; - H5Sselect_elements(file_space,H5S_SELECT_SET,5,elements); - - /* Create the dataspace for the dataset in memory */ - /* Selection for dataspace defaults to entire space */ - mem_space=H5Screate(H5S_SIMPLE); - - /* Set the extent & type of the memory dataspace */ - curr_dims={23,15,18}; /* This won't compile either :-) */ - H5Sset_extent_simple(mem_space,3,curr_dims,curr_dims); - - /* Set the elements for the selection in the file dataspace */ - elements[0]={9,2,1}; - elements[1]={13,1,12}; - elements[2]={4,1,7}; - elements[3]={0,12,0}; - elements[4]={20,10,17}; - H5Sselect_elements(mem_space,H5S_SELECT_SET,5,elements); - - /* Create the dataset on disk */ - dataset=H5Dcreate(file,"Dataset",H5T_NATIVE_UINT8,file_space,H5P_DEFAULT); - - /* Write the hyperslab to the file */ - H5Dwrite(dataset,H5T_NATIVE_UINT8,mem_space,file_space,H5P_DEFAULT,buf); - - /* Close dataspaces */ - H5Sclose(mem_space); - H5Sclose(file_space); - - /* Close dataset & file */ - H5Dclose(dataset); - H5Fclose(file); -} - - -Example 4: Create a simple fixed size 3-D dataspace in memory and on disk and - build up selection hyperslab selections to copy from memory to disk. The - selection is the same for both dataspaces, but a different offset is used, - to illustrate the selection offsets. - -{ - hid_t file; /* File ID */ - hid_t dataset; /* Dataset ID */ - hid_t mem_space, file_space; /* Dataspaces for memory and the file */ - uint8 *buf; /* Buffer for data */ - hsize_t start[3]; /* Start of hyperslab */ - hsize_t stride[3]; /* Stride for hyperslab */ - hsize_t count[3]; /* Hyperslab block count in each dimension */ - hsize_t block[3]; /* Hyperslab block size in each dimension */ - hssize_t offset[3]; /* Selection offset */ - hsize_t curr_dims[3]={13,14,15}; /* Dimensions of the dataset */ - - /* Create file */ - file = H5Fcreate("example4.h5", H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - - /* Create dataspace for dataset in the file */ - /* Selection for dataspace defaults to entire space */ - file_space=H5Screate(H5S_SIMPLE); - - /* Set the extent & type of the dataset's dataspace */ - H5Sset_extent_simple(file_space,3,curr_dims,curr_dims); - - /* Build up the selection with a series of hyperslab selections */ - start={0,2,4}; /* Again, this won't compile.. :-) */ - stride={1,1,1}; - count={6,5,8}; - block={1,1,1}; - - /* Set the first selection, union the rest in */ - H5Sselect_hyperslab(file_space,H5S_SELECT_SET,start,stride,count,block); - - /* initialize the second hyperslab */ - start={10,9,1}; /* Again, this won't compile.. :-) */ - stride={1,1,1}; - count={2,3,10}; - block={1,1,1}; - - /* Union the second hyperslab into the file dataspace's selection */ - H5Sselect_hyperslab(file_space,H5S_SELECT_UNION,start,stride,count,block); - - /* initialize the third hyperslab */ - start={3,10,5}; /* Again, this won't compile.. :-) */ - stride={1,1,1}; - count={8,2,6}; - block={1,1,1}; - - /* Union the final hyperslab into the file dataspace's selection */ - H5Sselect_hyperslab(file_space,H5S_SELECT_UNION,start,stride,count,block); - - /* Create the dataspace for the dataset in memory */ - /* Selection for dataspace defaults to entire space */ - mem_space=H5Screate(H5S_SIMPLE); - - /* Set the extent & type of the memory dataspace */ - curr_dims={23,15,18}; /* This won't compile either :-) */ - H5Sset_extent_simple(mem_space,3,curr_dims,curr_dims); - - /* Copy the selection from the file dataspace */ - H5Sselect_op(mem_space,H5S_SELECT_COPY,file_space); - - /* Adjust the offset of the selection in the memory dataspace */ - offset={1,1,1}; - H5Soffset_simple(mem_space,offset); - - /* Create the dataset on disk */ - dataset=H5Dcreate(file,"Dataset",H5T_NATIVE_UINT8,file_space,H5P_DEFAULT); - - /* Write the hyperslab to the file */ - H5Dwrite(dataset,H5T_NATIVE_UINT8,mem_space,file_space,H5P_DEFAULT,buf); - - /* Close dataspaces */ - H5Sclose(mem_space); - H5Sclose(file_space); - - /* Close dataset & file */ - H5Dclose(dataset); - H5Fclose(file); -} - - -Example 5: Same as example 1 (create a simple fixed size 3-D dataspace in memory and on disk and - copy the entire dataset to disk), except that the selection order is changed - for the memory dataspace, to change between FORTRAN and C array ordering. - -{ - hid_t file; /* File ID */ - hid_t dataset; /* Dataset ID */ - hid_t mem_space, file_space; /* Dataspaces for memory and the file */ - uint8 *buf; /* Buffer for data */ - hsize_t order[3]; /* Dimension ordering for selection */ - hsize_t curr_dims[3]={3,4,5}; /* Dimensions of the dataset */ - - /* Create file */ - file = H5Fcreate("example5.h5", H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - - /* Create dataspace for dataset in the file */ - /* Selection for dataspace defaults to entire space and C array order */ - file_space=H5Screate(H5S_SIMPLE); - - /* Set the extent & type of the dataset's dataspace */ - H5Sset_extent_simple(file_space,3,curr_dims,curr_dims); - - /* Create the dataspace for the dataset in memory */ - /* Selection for dataspace defaults to entire space and C array order */ - mem_space=H5Screate(H5S_SIMPLE); - - /* Set the extent & type of the memory dataspace */ - H5Sset_extent_simple(mem_space,3,curr_dims,curr_dims); - - /* Change selection ordering to FORTRAN order for memory dataspace */ - order={0,1,2}; - H5Sselect_order(mem_space,order); - - /* Create the dataset on disk */ - dataset=H5Dcreate(file,"Dataset",H5T_NATIVE_UINT8,file_space,H5P_DEFAULT); - - /* Write the dataset to the file */ - H5Dwrite(dataset,H5T_NATIVE_UINT8,mem_space,file_space,H5P_DEFAULT,buf); - - /* Close dataspaces */ - H5Sclose(mem_space); - H5Sclose(file_space); - - /* Close dataset & file */ - H5Dclose(dataset); - H5Fclose(file); -} - - -Example 6: Create a stored dataspace on disk and use the H5Ssubspace function - create a dataspace located within that space. - -{ - hid_t file; /* File ID */ - hid_t space1, space2; /* Dataspace IDs */ - hsize_t start[3]; /* Start of hyperslab */ - hsize_t count[3]; /* Hyperslab block count in each dimension */ - hsize_t curr_dims[3]={13,14,15};/* Dimensions of the dataset */ - - /* Create file */ - file = H5Fcreate("example6.h5", H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - - /* Create dataspace #1 */ - space1=H5Screate(H5S_SIMPLE); - - /* Set the extent & type of dataspace #1 */ - H5Sset_extent_simple(space1,3,curr_dims,curr_dims); - - /* Store dataspace #1 on disk */ - H5Scommit(file,"/Dataspaces/Dataspace #1",space1); - - /* Select a contiguous hyperslab in dataspace #1 to create dataspace #2 with */ - start={0,2,4}; - count={6,5,8}; - - /* - * Use stride and block set to NULL to get contiguous, single element sized - * hyperslab. The stride and block parameters could also be set to all - * 1's, but this is simpler and easier. - */ - H5Sselect_hyperslab(space1,H5S_SELECT_SET,start,NULL,count,NULL); - - /* Create dataspace #2 as a dataspace located within dataspace #1 */ - space2=H5Ssubspace(space1); - - /* Store dataspace #2 on disk also */ - H5Scommit(file,"/Dataspaces/Dataspace #2",space2); - - /* - * space1 & space2 can be used to create datasets, etc. Any datasets - * created with space2 can have their dataspace queried to find the parent - * dataspace and the location within the parent dataspace - */ - - /* Close dataspaces */ - H5Sclose(space1); - H5Sclose(space2); - - /* Close file */ - H5Fclose(file); -} diff --git a/doc/html/hdf2.jpg b/doc/html/hdf2.jpg deleted file mode 100644 index 92b53c9..0000000 Binary files a/doc/html/hdf2.jpg and /dev/null differ diff --git a/doc/html/heap.txt b/doc/html/heap.txt deleted file mode 100644 index 6b4c058..0000000 --- a/doc/html/heap.txt +++ /dev/null @@ -1,72 +0,0 @@ - HEAP MANAGEMENT IN HDF5 - ------------------------ - -Heap functions are in the H5H package. - - -off_t -H5H_new (hdf5_file_t *f, size_t size_hint, size_t realloc_hint); - - Creates a new heap in the specified file which can efficiently - store at least SIZE_HINT bytes. The heap can store more than - that, but doing so may cause the heap to become less efficient - (for instance, a heap implemented as a B-tree might become - discontigous). The REALLOC_HINT is the minimum number of bytes - by which the heap will grow when it must be resized. The hints - may be zero in which case reasonable (but probably not - optimal) values will be chosen. - - The return value is the address of the new heap relative to - the beginning of the file boot block. - -off_t -H5H_insert (hdf5_file_t *f, off_t addr, size_t size, const void *buf); - - Copies SIZE bytes of data from BUF into the heap whose address - is ADDR in file F. BUF must be the _entire_ heap object. The - return value is the byte offset of the new data in the heap. - -void * -H5H_read (hdf5_file_t *f, off_t addr, off_t offset, size_t size, void *buf); - - Copies SIZE bytes of data from the heap whose address is ADDR - in file F into BUF and then returns the address of BUF. If - BUF is the null pointer then a new buffer will be malloc'd by - this function and its address is returned. - - Returns buffer address or null. - -const void * -H5H_peek (hdf5_file_t *f, off_t addr, off_t offset) - - A more efficient version of H5H_read that returns a pointer - directly into the cache; the data is not copied from the cache - to a buffer. The pointer is valid until the next call to an - H5AC function directly or indirectly. - - Returns a pointer or null. Do not free the pointer. - -void * -H5H_write (hdf5_file_t *f, off_t addr, off_t offset, size_t size, - const void *buf); - - Modifies (part of) an object in the heap at address ADDR of - file F by copying SIZE bytes from the beginning of BUF to the - file. OFFSET is the address withing the heap where the output - is to occur. - - This function can fail if the combination of OFFSET and SIZE - would write over a boundary between two heap objects. - -herr_t -H5H_remove (hdf5_file_t *f, off_t addr, off_t offset, size_t size); - - Removes an object or part of an object which begins at byte - OFFSET within a heap whose address is ADDR in file F. SIZE - bytes are returned to the free list. Removing the middle of - an object has the side effect that one object is now split - into two objects. - - Returns success or failure. - - diff --git a/doc/html/index.html b/doc/html/index.html deleted file mode 100644 index 3e37f59..0000000 --- a/doc/html/index.html +++ /dev/null @@ -1,308 +0,0 @@ - - - - - - - - HDF5 - The Next Generation of the HDF library & tools - - - - - - - - - -
      -
      - - -
      - - HDF Logo - -

      HDF5 - A New Generation of HDF -
      The Hierarchical Data Format


      -
      -
      - - - -
      - - - - - - - - - - - - - - - - -
      -

      HDF Links at NCSA

      -
      -
      HDF Help Desk -
        -
      • Email to HDF Technical Support -
      -

      -

      HDF Home Page -

      -

      HDF Newsletters -
        -
      • News about HDF and HDF5 -
      -

      -

      HDF5 Downloads -
        -
      • HDF5 download site -
      -

      -

      HDF5 Overview, etc. -
        -
      • Overview of HDF5 library and development effort (slide show) -
      • Other HDF5-related papers and presentations -
      -

      -

      Parallel HDF5 -
        -
      • HDF5 in parallel computing environments -
      • Installation, tutorial, Q&A, design notes -
      -

      -

      XML and HDF5 -
        -
      • XML tools and standard XML DTD for HDF5 -
      -

      -

      HDF5 Doc Development -
        -
      • Snapshots of future releases -
      • Related document sets not part of standard release -
      • Errata, bugfixes, and updates for this release (if available) -
      - -

      - -

      -
      - -
      -

      HDF5 User Documentation
      Release 1.7
      - (unreleased development branch)

      - -
      -
      -
      An Introduction to HDF5 -
        -
      • An overview of design goals behind the HDF5 library and file format -
      • An introduction to HDF5 programming -
      -
      HDF5 Tutorial -
        -
      • A tutorial introduction to HDF5 -
      • Served from the HDF5 website at NCSA -
      -
      HDF5 User's Guide -
        -
      • A new user's guide, first published with HDF5 Release 2.0 -
      • The HDF5 Release 1.4.5 User's Guide - remains available, though it has not been updated - for the current release -
      -
      HDF5 Reference Manuals - -
      HDF5 and “Foreign Languages” - -
      HDF5 Glossary -
        -
      • A glossary of terms as they are used in HDF5 -
      -
      HDF5 Application Developer's Guide - -
      - -
      -

      HDF5 Tools

      -
      -
      HDF5 Tools -
        -
      • HDF5 tools, including Java-based tools -
      • HDFView, h5dump, - h5ls, h5toh4, etc. -
      -
      -
      -

      HDF5 Library Development Documentation

      -
      -
      HDF5 File Format Specification -
        -
      • The complete specification of the HDF5 file format -
      -
      HDF5 Technical Notes -
        -
      • Technical notes for HDF5 library and driver developers -
      - -
      -
      -
      (Internet links)
      -
      - -
      (Generally local links)
      -
      -
      - - - - -


      - - - NCSA Logo
      - The National Center for Supercomputing Applications

      - University of Illinois - at Urbana-Champaign - -

      -HDF Help Desk -
      - -Last modified: 17 May 2005 - - -
      -Describes HDF5 Release 1.7, the unreleased development branch; -working toward HDF5 Release 1.8.0. - -

      -Copyright by the Board of Trustees of the University of Illinois. -
      -All rights reserved. -See full copyright notice. - -
      - - - diff --git a/doc/html/move.html b/doc/html/move.html deleted file mode 100644 index ec87d11..0000000 --- a/doc/html/move.html +++ /dev/null @@ -1,66 +0,0 @@ - - - - How to Relocate a File Data Structure - - - -

      How to Relocate a File Data Structure

      - -

      Since file data structures can be cached in memory by the H5AC - package it becomes problematic to move such a data structure in - the file. One cannot just copy a portion of the file from one - location to another because: - -

        -
      1. the file might not contain the latest information, and
      2. -
      3. the H5AC package might not realize that the object's - address has changed and attempt to write the object to disk - at the old address.
      4. -
      - -

      Here's a correct method to move data from one location to - another. The example code assumes that one is moving a B-link - tree node from old_addr to new_addr. - -

        -
      1. Make sure the disk is up-to-date with respect to the - cache. There is no need to remove the item from the cache, - hence the final argument to H5AC_flush is - FALSE. -

        - - H5AC_flush (f, H5AC_BT, old_addr, FALSE);
        -
        -
        -
      2. - -
      3. Read the data from the old address and write it to the new - address. -

        - - H5F_block_read (f, old_addr, size, buf);
        - H5F_block_write (f, new_addr, size, buf);
        -
        -
        -
      4. - -
      5. Notify the cache that the address of the object changed. -

        - - H5AC_rename (f, H5AC_BT, old_addr, new_addr);
        -
        -
        -
      6. -
      - - - -
      -
      Robb Matzke
      - - -Last modified: Mon Jul 14 15:38:29 EST - - - diff --git a/doc/html/ph5design.html b/doc/html/ph5design.html deleted file mode 100644 index 1280052..0000000 --- a/doc/html/ph5design.html +++ /dev/null @@ -1,77 +0,0 @@ - - - - -new - - - - -

      Parallel HDF5 Design

      -

       

      -

      1. Design Overview

      -

      In this section, I first describe the function requirements of the Parallel HDF5 (PHDF5) software and the assumed system requirements. Section 2 describes the programming model of the PHDF5 interface. Section 3 shows an example PHDF5 program.

      -

      1.1. Function requirements

      - -
        -
      • An API to support parallel file access for HDF5 files in a message passing environment.
      • -
      • Fast parallel I/O to large datasets through standard parallel I/O interface.
      • -
      • Processes are required to do collective API calls only when structural changes are needed for the HDF5 file.
      • -
      • Each process may do independent I/O requests to different datasets in the same or different HDF5 files.
      • -
      • Supports collective I/O requests for datasets (to be included in next version).
      • -
      • Minimize diviation from HDF5 interface.
      • -
      - -

      1.2. System requirements

      - -
        -
      • C language interface is the initial requirement. Fortran77 interface will be added later.
      • -
      • Use Message Passing Interface (MPI) for interprocess communication.
      • -
      • Use MPI-IO calls for parallel file accesses.
      • -
      • Initial platforms—IBM SP2, Intel TFLOPS and SGI Origin 2000.
      - -

      2. Programming Model

      -

      HDF5 uses optional access template object to control the file access -mechanism. The general model in accessing an HDF5 file in parallel -contains the following steps:

      - -
        -
      • Setup access template
      • -
      • File open
      • -
      • Dataset open
      • -
      • Dataset data access (zero or more)
      • -
      • Dataset close
      • -
      • File close
      - -

      2.1. Setup access template

      -

      Each processes of the MPI communicator creates an access template and sets -it up with MPI parallel access information (communicator, info object, -access-mode).

      -

      2.1. File open

      -

      All processes of the MPI communicator open an HDF5 file by a collective call -(H5FCreate or H5Fopen) with the access template.

      -

      2.2. Dataset open

      -

      All processes of the MPI communicator open a dataset by a collective call (H5Dcreate or H5Dopen).  This version supports only collective dataset open.  Future version may support datasets open by a subset of the processes that have opened the file.

      -

      2.3. Dataset access

      -

      2.3.1. Independent dataset access

      -

      Each process may do independent and arbitrary number of data I/O access by independent calls (H5Dread or H5Dwrite) to the dataset with the transfer template set for independent access.  (The default transfer mode is independent transfer).  If the dataset is an unlimited dimension one and if the H5Dwrite is writing data beyond the current dimension size of the dataset, all processes that have opened the dataset must make a collective call (H5Dallocate) to allocate more space for the dataset BEFORE the independent H5Dwrite call.

      -

      2.3.2. Collective dataset access

      -

      All processes that have opened the dataset may do collective data I/O access by collective calls (H5Dread or H5Dwrite) to the dataset with the transfer template set for collective access.  Pre-allocation (H5Dallocate) is not needed for unlimited dimension datasets since the H5Dallocate call, if needed, is done internally by the collective data access call.

      -

      2.3.3. Dataset attributes access

      -

      Changes to attributes can only occur at the "main process" (process 0).  Read only access to attributes can occur independent in each process that has opened the dataset.  (API to be defined later.)

      -

      2.4. Dataset close

      -

      All processes that have opened the dataset must close the dataset by a collective call (H5Dclose).

      -

      2.5. File close

      -

      All processes that have opened the file must close the file by a collective call (H5Fclose).

      -

      3. Parallel HDF5 Example

      -
      -
      -Example code
      -
      -


      -

      Send comments to
      -hdfparallel@ncsa.uiuc.edu

      -
      Last Modified: Feb 16, 1998
      - diff --git a/doc/html/ph5example.c b/doc/html/ph5example.c deleted file mode 100644 index a69f221..0000000 --- a/doc/html/ph5example.c +++ /dev/null @@ -1,1018 +0,0 @@ -/* - * Example of using the parallel HDF5 library to access datasets. - * Last revised: April 24, 2001. - * - * This program contains two parts. In the first part, the mpi processes - * collectively create a new parallel HDF5 file and create two fixed - * dimension datasets in it. Then each process writes a hyperslab into - * each dataset in an independent mode. All processes collectively - * close the datasets and the file. - * In the second part, the processes collectively open the created file - * and the two datasets in it. Then each process reads a hyperslab from - * each dataset in an independent mode and prints them out. - * All processes collectively close the datasets and the file. - */ - -#include -#include - -#ifdef H5_HAVE_PARALLEL -/* Temporary source code */ -#define FAIL -1 -/* temporary code end */ - -/* Define some handy debugging shorthands, routines, ... */ -/* debugging tools */ -#define MESG(x)\ - if (verbose) printf("%s\n", x);\ - -#define MPI_BANNER(mesg)\ - {printf("--------------------------------\n");\ - printf("Proc %d: ", mpi_rank); \ - printf("*** %s\n", mesg);\ - printf("--------------------------------\n");} - -#define SYNC(comm)\ - {MPI_BANNER("doing a SYNC"); MPI_Barrier(comm); MPI_BANNER("SYNC DONE");} -/* End of Define some handy debugging shorthands, routines, ... */ - -/* Constants definitions */ -/* 24 is a multiple of 2, 3, 4, 6, 8, 12. Neat for parallel tests. */ -#define SPACE1_DIM1 24 -#define SPACE1_DIM2 24 -#define SPACE1_RANK 2 -#define DATASETNAME1 "Data1" -#define DATASETNAME2 "Data2" -#define DATASETNAME3 "Data3" -/* hyperslab layout styles */ -#define BYROW 1 /* divide into slabs of rows */ -#define BYCOL 2 /* divide into blocks of columns */ - - -/* dataset data type. Int's can be easily octo dumped. */ -typedef int DATATYPE; - -/* global variables */ -int nerrors = 0; /* errors count */ - -int mpi_size, mpi_rank; /* mpi variables */ - -/* option flags */ -int verbose = 0; /* verbose, default as no. */ -int doread=1; /* read test */ -int dowrite=1; /* write test */ - - - -/* - * Setup the dimensions of the hyperslab. - * Two modes--by rows or by columns. - * Assume dimension rank is 2. - */ -void -slab_set(hsize_t start[], hsize_t count[], hsize_t stride[], int mode) -{ - switch (mode){ - case BYROW: - /* Each process takes a slabs of rows. */ - stride[0] = 1; - stride[1] = 1; - count[0] = SPACE1_DIM1/mpi_size; - count[1] = SPACE1_DIM2; - start[0] = mpi_rank*count[0]; - start[1] = 0; - break; - case BYCOL: - /* Each process takes a block of columns. */ - stride[0] = 1; - stride[1] = 1; - count[0] = SPACE1_DIM1; - count[1] = SPACE1_DIM2/mpi_size; - start[0] = 0; - start[1] = mpi_rank*count[1]; - break; - default: - /* Unknown mode. Set it to cover the whole dataset. */ - printf("unknown slab_set mode (%d)\n", mode); - stride[0] = 1; - stride[1] = 1; - count[0] = SPACE1_DIM1; - count[1] = SPACE1_DIM2; - start[0] = 0; - start[1] = 0; - break; - } -} - - -/* - * Fill the dataset with trivial data for testing. - * Assume dimension rank is 2 and data is stored contiguous. - */ -void -dataset_fill(hsize_t start[], hsize_t count[], hsize_t stride[], DATATYPE * dataset) -{ - DATATYPE *dataptr = dataset; - int i, j; - - /* put some trivial data in the data_array */ - for (i=0; i < count[0]; i++){ - for (j=0; j < count[1]; j++){ - *dataptr++ = (i*stride[0]+start[0])*100 + (j*stride[1]+start[1]+1); - } - } -} - - -/* - * Print the content of the dataset. - */ -void dataset_print(hsize_t start[], hsize_t count[], hsize_t stride[], DATATYPE * dataset) -{ - DATATYPE *dataptr = dataset; - int i, j; - - /* print the slab read */ - for (i=0; i < count[0]; i++){ - printf("Row %d: ", (int)(i*stride[0]+start[0])); - for (j=0; j < count[1]; j++){ - printf("%03d ", *dataptr++); - } - printf("\n"); - } -} - - -/* - * Print the content of the dataset. - */ -int dataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], DATATYPE *dataset, DATATYPE *original) -{ -#define MAX_ERR_REPORT 10 /* Maximum number of errors reported */ - DATATYPE *dataptr = dataset; - DATATYPE *originptr = original; - - int i, j, nerrors; - - /* print it if verbose */ - if (verbose) - dataset_print(start, count, stride, dataset); - - nerrors = 0; - for (i=0; i < count[0]; i++){ - for (j=0; j < count[1]; j++){ - if (*dataset++ != *original++){ - nerrors++; - if (nerrors <= MAX_ERR_REPORT){ - printf("Dataset Verify failed at [%d][%d](row %d, col %d): expect %d, got %d\n", - i, j, - (int)(i*stride[0]+start[0]), (int)(j*stride[1]+start[1]), - *(dataset-1), *(original-1)); - } - } - } - } - if (nerrors > MAX_ERR_REPORT) - printf("[more errors ...]\n"); - if (nerrors) - printf("%d errors found in dataset_vrfy\n", nerrors); - return(nerrors); -} - - -/* - * Example of using the parallel HDF5 library to create two datasets - * in one HDF5 files with parallel MPIO access support. - * The Datasets are of sizes (number-of-mpi-processes x DIM1) x DIM2. - * Each process controls only a slab of size DIM1 x DIM2 within each - * dataset. - */ - -void -phdf5writeInd(char *filename) -{ - hid_t fid1, fid2; /* HDF5 file IDs */ - hid_t acc_tpl1; /* File access templates */ - hid_t sid1,sid2; /* Dataspace ID */ - hid_t file_dataspace; /* File dataspace ID */ - hid_t mem_dataspace; /* memory dataspace ID */ - hid_t dataset1, dataset2; /* Dataset ID */ - int rank = SPACE1_RANK; /* Logical rank of dataspace */ - hsize_t dims1[SPACE1_RANK] = - {SPACE1_DIM1,SPACE1_DIM2}; /* dataspace dim sizes */ - hsize_t dimslocal1[SPACE1_RANK] = - {SPACE1_DIM1,SPACE1_DIM2}; /* local dataspace dim sizes */ - DATATYPE data_array1[SPACE1_DIM1][SPACE1_DIM2]; /* data buffer */ - - hsize_t start[SPACE1_RANK]; /* for hyperslab setting */ - hsize_t count[SPACE1_RANK], stride[SPACE1_RANK]; /* for hyperslab setting */ - - herr_t ret; /* Generic return value */ - int i, j; - int mpi_size, mpi_rank; - char *fname; - int mrc; /* mpi return code */ - - MPI_Comm comm = MPI_COMM_WORLD; - MPI_Info info = MPI_INFO_NULL; - - if (verbose) - printf("Independent write test on file %s\n", filename); - - /* set up MPI parameters */ - MPI_Comm_size(MPI_COMM_WORLD,&mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank); - - /* ------------------- - * START AN HDF5 FILE - * -------------------*/ - /* setup file access template with parallel IO access. */ - acc_tpl1 = H5Pcreate (H5P_FILE_ACCESS); - assert(acc_tpl1 != FAIL); - MESG("H5Pcreate access succeed"); - /* set Parallel access with communicator */ - ret = H5Pset_fapl_mpio(acc_tpl1, comm, info); - assert(ret != FAIL); - MESG("H5Pset_fapl_mpio succeed"); - - /* create the file collectively */ - fid1=H5Fcreate(filename,H5F_ACC_TRUNC,H5P_DEFAULT,acc_tpl1); - assert(fid1 != FAIL); - MESG("H5Fcreate succeed"); - - /* Release file-access template */ - ret=H5Pclose(acc_tpl1); - assert(ret != FAIL); - - - /* -------------------------- - * Define the dimensions of the overall datasets - * and the slabs local to the MPI process. - * ------------------------- */ - /* setup dimensionality object */ - sid1 = H5Screate_simple (SPACE1_RANK, dims1, NULL); - assert (sid1 != FAIL); - MESG("H5Screate_simple succeed"); - - - /* create a dataset collectively */ - dataset1 = H5Dcreate(fid1, DATASETNAME1, H5T_NATIVE_INT, sid1, - H5P_DEFAULT); - assert(dataset1 != FAIL); - MESG("H5Dcreate succeed"); - - /* create another dataset collectively */ - dataset2 = H5Dcreate(fid1, DATASETNAME2, H5T_NATIVE_INT, sid1, - H5P_DEFAULT); - assert(dataset2 != FAIL); - MESG("H5Dcreate succeed"); - - - - /* set up dimensions of the slab this process accesses */ - start[0] = mpi_rank*SPACE1_DIM1/mpi_size; - start[1] = 0; - count[0] = SPACE1_DIM1/mpi_size; - count[1] = SPACE1_DIM2; - stride[0] = 1; - stride[1] =1; -if (verbose) - printf("start[]=(%d,%d), count[]=(%d,%d), total datapoints=%d\n", - start[0], start[1], count[0], count[1], count[0]*count[1]); - - /* put some trivial data in the data_array */ - dataset_fill(start, count, stride, &data_array1[0][0]); - MESG("data_array initialized"); - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space (dataset1); - assert(file_dataspace != FAIL); - MESG("H5Dget_space succeed"); - ret=H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, - count, NULL); - assert(ret != FAIL); - MESG("H5Sset_hyperslab succeed"); - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple (SPACE1_RANK, count, NULL); - assert (mem_dataspace != FAIL); - - /* write data independently */ - ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - H5P_DEFAULT, data_array1); - assert(ret != FAIL); - MESG("H5Dwrite succeed"); - - /* write data independently */ - ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - H5P_DEFAULT, data_array1); - assert(ret != FAIL); - MESG("H5Dwrite succeed"); - - /* release dataspace ID */ - H5Sclose(file_dataspace); - - /* close dataset collectively */ - ret=H5Dclose(dataset1); - assert(ret != FAIL); - MESG("H5Dclose1 succeed"); - ret=H5Dclose(dataset2); - assert(ret != FAIL); - MESG("H5Dclose2 succeed"); - - /* release all IDs created */ - H5Sclose(sid1); - - /* close the file collectively */ - H5Fclose(fid1); -} - -/* Example of using the parallel HDF5 library to read a dataset */ -void -phdf5readInd(char *filename) -{ - hid_t fid1, fid2; /* HDF5 file IDs */ - hid_t acc_tpl1; /* File access templates */ - hid_t sid1,sid2; /* Dataspace ID */ - hid_t file_dataspace; /* File dataspace ID */ - hid_t mem_dataspace; /* memory dataspace ID */ - hid_t dataset1, dataset2; /* Dataset ID */ - int rank = SPACE1_RANK; /* Logical rank of dataspace */ - hsize_t dims1[] = {SPACE1_DIM1,SPACE1_DIM2}; /* dataspace dim sizes */ - DATATYPE data_array1[SPACE1_DIM1][SPACE1_DIM2]; /* data buffer */ - DATATYPE data_origin1[SPACE1_DIM1][SPACE1_DIM2]; /* expected data buffer */ - - hsize_t start[SPACE1_RANK]; /* for hyperslab setting */ - hsize_t count[SPACE1_RANK], stride[SPACE1_RANK]; /* for hyperslab setting */ - - herr_t ret; /* Generic return value */ - int i, j; - int mpi_size, mpi_rank; - - MPI_Comm comm = MPI_COMM_WORLD; - MPI_Info info = MPI_INFO_NULL; - - if (verbose) - printf("Independent read test on file %s\n", filename); - - /* set up MPI parameters */ - MPI_Comm_size(MPI_COMM_WORLD,&mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank); - - - /* setup file access template */ - acc_tpl1 = H5Pcreate (H5P_FILE_ACCESS); - assert(acc_tpl1 != FAIL); - /* set Parallel access with communicator */ - ret = H5Pset_fapl_mpio(acc_tpl1, comm, info); - assert(ret != FAIL); - - - /* open the file collectively */ - fid1=H5Fopen(filename,H5F_ACC_RDWR,acc_tpl1); - assert(fid1 != FAIL); - - /* Release file-access template */ - ret=H5Pclose(acc_tpl1); - assert(ret != FAIL); - - /* open the dataset1 collectively */ - dataset1 = H5Dopen(fid1, DATASETNAME1); - assert(dataset1 != FAIL); - - /* open another dataset collectively */ - dataset2 = H5Dopen(fid1, DATASETNAME1); - assert(dataset2 != FAIL); - - - /* set up dimensions of the slab this process accesses */ - start[0] = mpi_rank*SPACE1_DIM1/mpi_size; - start[1] = 0; - count[0] = SPACE1_DIM1/mpi_size; - count[1] = SPACE1_DIM2; - stride[0] = 1; - stride[1] =1; -if (verbose) - printf("start[]=(%d,%d), count[]=(%d,%d), total datapoints=%d\n", - start[0], start[1], count[0], count[1], count[0]*count[1]); - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space (dataset1); - assert(file_dataspace != FAIL); - ret=H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, - count, NULL); - assert(ret != FAIL); - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple (SPACE1_RANK, count, NULL); - assert (mem_dataspace != FAIL); - - /* fill dataset with test data */ - dataset_fill(start, count, stride, &data_origin1[0][0]); - - /* read data independently */ - ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - H5P_DEFAULT, data_array1); - assert(ret != FAIL); - - /* verify the read data with original expected data */ - ret = dataset_vrfy(start, count, stride, &data_array1[0][0], &data_origin1[0][0]); - assert(ret != FAIL); - - /* read data independently */ - ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - H5P_DEFAULT, data_array1); - assert(ret != FAIL); - - /* verify the read data with original expected data */ - ret = dataset_vrfy(start, count, stride, &data_array1[0][0], &data_origin1[0][0]); - assert(ret == 0); - - /* close dataset collectively */ - ret=H5Dclose(dataset1); - assert(ret != FAIL); - ret=H5Dclose(dataset2); - assert(ret != FAIL); - - /* release all IDs created */ - H5Sclose(file_dataspace); - - /* close the file collectively */ - H5Fclose(fid1); -} - - -/* - * Example of using the parallel HDF5 library to create two datasets - * in one HDF5 file with collective parallel access support. - * The Datasets are of sizes (number-of-mpi-processes x DIM1) x DIM2. - * Each process controls only a slab of size DIM1 x DIM2 within each - * dataset. [Note: not so yet. Datasets are of sizes DIM1xDIM2 and - * each process controls a hyperslab within.] - */ - -void -phdf5writeAll(char *filename) -{ - hid_t fid1, fid2; /* HDF5 file IDs */ - hid_t acc_tpl1; /* File access templates */ - hid_t xfer_plist; /* Dataset transfer properties list */ - hid_t sid1,sid2; /* Dataspace ID */ - hid_t file_dataspace; /* File dataspace ID */ - hid_t mem_dataspace; /* memory dataspace ID */ - hid_t dataset1, dataset2; /* Dataset ID */ - int rank = SPACE1_RANK; /* Logical rank of dataspace */ - hsize_t dims1[SPACE1_RANK] = - {SPACE1_DIM1,SPACE1_DIM2}; /* dataspace dim sizes */ - DATATYPE data_array1[SPACE1_DIM1][SPACE1_DIM2]; /* data buffer */ - - hsize_t start[SPACE1_RANK]; /* for hyperslab setting */ - hsize_t count[SPACE1_RANK], stride[SPACE1_RANK]; /* for hyperslab setting */ - - herr_t ret; /* Generic return value */ - int mpi_size, mpi_rank; - - MPI_Comm comm = MPI_COMM_WORLD; - MPI_Info info = MPI_INFO_NULL; - - if (verbose) - printf("Collective write test on file %s\n", filename); - - /* set up MPI parameters */ - MPI_Comm_size(MPI_COMM_WORLD,&mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank); - - /* ------------------- - * START AN HDF5 FILE - * -------------------*/ - /* setup file access template with parallel IO access. */ - acc_tpl1 = H5Pcreate (H5P_FILE_ACCESS); - assert(acc_tpl1 != FAIL); - MESG("H5Pcreate access succeed"); - /* set Parallel access with communicator */ - ret = H5Pset_fapl_mpio(acc_tpl1, comm, info); - assert(ret != FAIL); - MESG("H5Pset_fapl_mpio succeed"); - - /* create the file collectively */ - fid1=H5Fcreate(filename,H5F_ACC_TRUNC,H5P_DEFAULT,acc_tpl1); - assert(fid1 != FAIL); - MESG("H5Fcreate succeed"); - - /* Release file-access template */ - ret=H5Pclose(acc_tpl1); - assert(ret != FAIL); - - - /* -------------------------- - * Define the dimensions of the overall datasets - * and create the dataset - * ------------------------- */ - /* setup dimensionality object */ - sid1 = H5Screate_simple (SPACE1_RANK, dims1, NULL); - assert (sid1 != FAIL); - MESG("H5Screate_simple succeed"); - - - /* create a dataset collectively */ - dataset1 = H5Dcreate(fid1, DATASETNAME1, H5T_NATIVE_INT, sid1, H5P_DEFAULT); - assert(dataset1 != FAIL); - MESG("H5Dcreate succeed"); - - /* create another dataset collectively */ - dataset2 = H5Dcreate(fid1, DATASETNAME2, H5T_NATIVE_INT, sid1, H5P_DEFAULT); - assert(dataset2 != FAIL); - MESG("H5Dcreate 2 succeed"); - - /* - * Set up dimensions of the slab this process accesses. - */ - - /* Dataset1: each process takes a block of rows. */ - slab_set(start, count, stride, BYROW); -if (verbose) - printf("start[]=(%d,%d), count[]=(%d,%d), total datapoints=%d\n", - start[0], start[1], count[0], count[1], count[0]*count[1]); - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space (dataset1); - assert(file_dataspace != FAIL); - MESG("H5Dget_space succeed"); - ret=H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, - count, NULL); - assert(ret != FAIL); - MESG("H5Sset_hyperslab succeed"); - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple (SPACE1_RANK, count, NULL); - assert (mem_dataspace != FAIL); - - /* fill the local slab with some trivial data */ - dataset_fill(start, count, stride, &data_array1[0][0]); - MESG("data_array initialized"); - if (verbose){ - MESG("data_array created"); - dataset_print(start, count, stride, &data_array1[0][0]); - } - - /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate (H5P_DATASET_XFER); - assert(xfer_plist != FAIL); - ret=H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - assert(ret != FAIL); - MESG("H5Pcreate xfer succeed"); - - /* write data collectively */ - ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - xfer_plist, data_array1); - assert(ret != FAIL); - MESG("H5Dwrite succeed"); - - /* release all temporary handles. */ - /* Could have used them for dataset2 but it is cleaner */ - /* to create them again.*/ - H5Sclose(file_dataspace); - H5Sclose(mem_dataspace); - H5Pclose(xfer_plist); - - /* Dataset2: each process takes a block of columns. */ - slab_set(start, count, stride, BYCOL); -if (verbose) - printf("start[]=(%d,%d), count[]=(%d,%d), total datapoints=%d\n", - start[0], start[1], count[0], count[1], count[0]*count[1]); - - /* put some trivial data in the data_array */ - dataset_fill(start, count, stride, &data_array1[0][0]); - MESG("data_array initialized"); - if (verbose){ - MESG("data_array created"); - dataset_print(start, count, stride, &data_array1[0][0]); - } - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space (dataset1); - assert(file_dataspace != FAIL); - MESG("H5Dget_space succeed"); - ret=H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, - count, NULL); - assert(ret != FAIL); - MESG("H5Sset_hyperslab succeed"); - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple (SPACE1_RANK, count, NULL); - assert (mem_dataspace != FAIL); - - /* fill the local slab with some trivial data */ - dataset_fill(start, count, stride, &data_array1[0][0]); - MESG("data_array initialized"); - if (verbose){ - MESG("data_array created"); - dataset_print(start, count, stride, &data_array1[0][0]); - } - - /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate (H5P_DATASET_XFER); - assert(xfer_plist != FAIL); - ret=H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - assert(ret != FAIL); - MESG("H5Pcreate xfer succeed"); - - /* write data independently */ - ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - xfer_plist, data_array1); - assert(ret != FAIL); - MESG("H5Dwrite succeed"); - - /* release all temporary handles. */ - H5Sclose(file_dataspace); - H5Sclose(mem_dataspace); - H5Pclose(xfer_plist); - - - /* - * All writes completed. Close datasets collectively - */ - ret=H5Dclose(dataset1); - assert(ret != FAIL); - MESG("H5Dclose1 succeed"); - ret=H5Dclose(dataset2); - assert(ret != FAIL); - MESG("H5Dclose2 succeed"); - - /* release all IDs created */ - H5Sclose(sid1); - - /* close the file collectively */ - H5Fclose(fid1); -} - -/* - * Example of using the parallel HDF5 library to read two datasets - * in one HDF5 file with collective parallel access support. - * The Datasets are of sizes (number-of-mpi-processes x DIM1) x DIM2. - * Each process controls only a slab of size DIM1 x DIM2 within each - * dataset. [Note: not so yet. Datasets are of sizes DIM1xDIM2 and - * each process controls a hyperslab within.] - */ - -void -phdf5readAll(char *filename) -{ - hid_t fid1, fid2; /* HDF5 file IDs */ - hid_t acc_tpl1; /* File access templates */ - hid_t xfer_plist; /* Dataset transfer properties list */ - hid_t sid1,sid2; /* Dataspace ID */ - hid_t file_dataspace; /* File dataspace ID */ - hid_t mem_dataspace; /* memory dataspace ID */ - hid_t dataset1, dataset2; /* Dataset ID */ - int rank = SPACE1_RANK; /* Logical rank of dataspace */ - hsize_t dims1[] = {SPACE1_DIM1,SPACE1_DIM2}; /* dataspace dim sizes */ - DATATYPE data_array1[SPACE1_DIM1][SPACE1_DIM2]; /* data buffer */ - DATATYPE data_origin1[SPACE1_DIM1][SPACE1_DIM2]; /* expected data buffer */ - - hsize_t start[SPACE1_RANK]; /* for hyperslab setting */ - hsize_t count[SPACE1_RANK], stride[SPACE1_RANK]; /* for hyperslab setting */ - - herr_t ret; /* Generic return value */ - int mpi_size, mpi_rank; - - MPI_Comm comm = MPI_COMM_WORLD; - MPI_Info info = MPI_INFO_NULL; - - if (verbose) - printf("Collective read test on file %s\n", filename); - - /* set up MPI parameters */ - MPI_Comm_size(MPI_COMM_WORLD,&mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank); - - /* ------------------- - * OPEN AN HDF5 FILE - * -------------------*/ - /* setup file access template with parallel IO access. */ - acc_tpl1 = H5Pcreate (H5P_FILE_ACCESS); - assert(acc_tpl1 != FAIL); - MESG("H5Pcreate access succeed"); - /* set Parallel access with communicator */ - ret = H5Pset_fapl_mpio(acc_tpl1, comm, info); - assert(ret != FAIL); - MESG("H5Pset_fapl_mpio succeed"); - - /* open the file collectively */ - fid1=H5Fopen(filename,H5F_ACC_RDWR,acc_tpl1); - assert(fid1 != FAIL); - MESG("H5Fopen succeed"); - - /* Release file-access template */ - ret=H5Pclose(acc_tpl1); - assert(ret != FAIL); - - - /* -------------------------- - * Open the datasets in it - * ------------------------- */ - /* open the dataset1 collectively */ - dataset1 = H5Dopen(fid1, DATASETNAME1); - assert(dataset1 != FAIL); - MESG("H5Dopen succeed"); - - /* open another dataset collectively */ - dataset2 = H5Dopen(fid1, DATASETNAME1); - assert(dataset2 != FAIL); - MESG("H5Dopen 2 succeed"); - - /* - * Set up dimensions of the slab this process accesses. - */ - - /* Dataset1: each process takes a block of columns. */ - slab_set(start, count, stride, BYCOL); -if (verbose) - printf("start[]=(%d,%d), count[]=(%d,%d), total datapoints=%d\n", - start[0], start[1], count[0], count[1], count[0]*count[1]); - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space (dataset1); - assert(file_dataspace != FAIL); - MESG("H5Dget_space succeed"); - ret=H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, - count, NULL); - assert(ret != FAIL); - MESG("H5Sset_hyperslab succeed"); - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple (SPACE1_RANK, count, NULL); - assert (mem_dataspace != FAIL); - - /* fill dataset with test data */ - dataset_fill(start, count, stride, &data_origin1[0][0]); - MESG("data_array initialized"); - if (verbose){ - MESG("data_array created"); - dataset_print(start, count, stride, &data_array1[0][0]); - } - - /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate (H5P_DATASET_XFER); - assert(xfer_plist != FAIL); - ret=H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - assert(ret != FAIL); - MESG("H5Pcreate xfer succeed"); - - /* read data collectively */ - ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - xfer_plist, data_array1); - assert(ret != FAIL); - MESG("H5Dread succeed"); - - /* verify the read data with original expected data */ - ret = dataset_vrfy(start, count, stride, &data_array1[0][0], &data_origin1[0][0]); - assert(ret != FAIL); - - /* release all temporary handles. */ - /* Could have used them for dataset2 but it is cleaner */ - /* to create them again.*/ - H5Sclose(file_dataspace); - H5Sclose(mem_dataspace); - H5Pclose(xfer_plist); - - /* Dataset2: each process takes a block of rows. */ - slab_set(start, count, stride, BYROW); -if (verbose) - printf("start[]=(%d,%d), count[]=(%d,%d), total datapoints=%d\n", - start[0], start[1], count[0], count[1], count[0]*count[1]); - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space (dataset1); - assert(file_dataspace != FAIL); - MESG("H5Dget_space succeed"); - ret=H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, - count, NULL); - assert(ret != FAIL); - MESG("H5Sset_hyperslab succeed"); - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple (SPACE1_RANK, count, NULL); - assert (mem_dataspace != FAIL); - - /* fill dataset with test data */ - dataset_fill(start, count, stride, &data_origin1[0][0]); - MESG("data_array initialized"); - if (verbose){ - MESG("data_array created"); - dataset_print(start, count, stride, &data_array1[0][0]); - } - - /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate (H5P_DATASET_XFER); - assert(xfer_plist != FAIL); - ret=H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - assert(ret != FAIL); - MESG("H5Pcreate xfer succeed"); - - /* read data independently */ - ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - xfer_plist, data_array1); - assert(ret != FAIL); - MESG("H5Dread succeed"); - - /* verify the read data with original expected data */ - ret = dataset_vrfy(start, count, stride, &data_array1[0][0], &data_origin1[0][0]); - assert(ret != FAIL); - - /* release all temporary handles. */ - H5Sclose(file_dataspace); - H5Sclose(mem_dataspace); - H5Pclose(xfer_plist); - - - /* - * All reads completed. Close datasets collectively - */ - ret=H5Dclose(dataset1); - assert(ret != FAIL); - MESG("H5Dclose1 succeed"); - ret=H5Dclose(dataset2); - assert(ret != FAIL); - MESG("H5Dclose2 succeed"); - - /* close the file collectively */ - H5Fclose(fid1); -} - -/* - * test file access by communicator besides COMM_WORLD. - * Split COMM_WORLD into two, one (even_comm) contains the original - * processes of even ranks. The other (odd_comm) contains the original - * processes of odd ranks. Processes in even_comm creates a file, then - * cloose it, using even_comm. Processes in old_comm just do a barrier - * using odd_comm. Then they all do a barrier using COMM_WORLD. - * If the file creation and cloose does not do correct collective action - * according to the communicator argument, the processes will freeze up - * sooner or later due to barrier mixed up. - */ -void -test_split_comm_access(char *filenames[]) -{ - int mpi_size, myrank; - MPI_Comm comm; - MPI_Info info = MPI_INFO_NULL; - int color, mrc; - int newrank, newprocs; - hid_t fid; /* file IDs */ - hid_t acc_tpl; /* File access properties */ - herr_t ret; /* generic return value */ - - if (verbose) - printf("Independent write test on file %s %s\n", - filenames[0], filenames[1]); - - /* set up MPI parameters */ - MPI_Comm_size(MPI_COMM_WORLD,&mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD,&myrank); - color = myrank%2; - mrc = MPI_Comm_split (MPI_COMM_WORLD, color, myrank, &comm); - assert(mrc==MPI_SUCCESS); - MPI_Comm_size(comm,&newprocs); - MPI_Comm_rank(comm,&newrank); - - if (color){ - /* odd-rank processes */ - mrc = MPI_Barrier(comm); - assert(mrc==MPI_SUCCESS); - }else{ - /* even-rank processes */ - /* setup file access template */ - acc_tpl = H5Pcreate (H5P_FILE_ACCESS); - assert(acc_tpl != FAIL); - - /* set Parallel access with communicator */ - ret = H5Pset_fapl_mpio(acc_tpl, comm, info); - assert(ret != FAIL); - - /* create the file collectively */ - fid=H5Fcreate(filenames[color],H5F_ACC_TRUNC,H5P_DEFAULT,acc_tpl); - assert(fid != FAIL); - MESG("H5Fcreate succeed"); - - /* Release file-access template */ - ret=H5Pclose(acc_tpl); - assert(ret != FAIL); - - ret=H5Fclose(fid); - assert(ret != FAIL); - } - if (myrank == 0){ - mrc = MPI_File_delete(filenames[color], info); - assert(mrc==MPI_SUCCESS); - } -} - -/* - * Show command usage - */ -void -usage() -{ - printf("Usage: testphdf5 [-r] [-w] [-v]\n"); - printf("\t-r\tno read\n"); - printf("\t-w\tno write\n"); - printf("\t-v\tverbose on\n"); - printf("\tdefault do write then read\n"); - printf("\n"); -} - - -/* - * parse the command line options - */ -int -parse_options(int argc, char **argv){ - while (--argc){ - if (**(++argv) != '-'){ - break; - }else{ - switch(*(*argv+1)){ - case 'r': doread = 0; - break; - case 'w': dowrite = 0; - break; - case 'v': verbose = 1; - break; - default: usage(); - nerrors++; - return(1); - } - } - } - return(0); -} - - -int -main(int argc, char **argv) -{ - char *filenames[]={ "ParaEg1.h5f", "ParaEg2.h5f" }; - - int mpi_namelen; - char mpi_name[MPI_MAX_PROCESSOR_NAME]; - - MPI_Init(&argc,&argv); - MPI_Comm_size(MPI_COMM_WORLD,&mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank); - MPI_Get_processor_name(mpi_name,&mpi_namelen); - /* Make sure datasets can be divided into equal chunks by the processes */ - if ((SPACE1_DIM1 % mpi_size) || (SPACE1_DIM2 % mpi_size)){ - printf("DIM1(%d) and DIM2(%d) must be multiples of processes (%d)\n", - SPACE1_DIM1, SPACE1_DIM2, mpi_size); - nerrors++; - goto finish; - } - - if (parse_options(argc, argv) != 0) - goto finish; - - if (dowrite){ - MPI_BANNER("testing PHDF5 dataset using split communicators..."); - test_split_comm_access(filenames); - MPI_BANNER("testing PHDF5 dataset independent write..."); - phdf5writeInd(filenames[0]); - MPI_BANNER("testing PHDF5 dataset collective write..."); - phdf5writeAll(filenames[1]); - } - if (doread){ - MPI_BANNER("testing PHDF5 dataset independent read..."); - phdf5readInd(filenames[0]); - MPI_BANNER("testing PHDF5 dataset collective read..."); - phdf5readAll(filenames[1]); - } - - if (!(dowrite || doread)){ - usage(); - nerrors++; - } - -finish: - if (mpi_rank == 0){ /* only process 0 reports */ - if (nerrors) - printf("***PHDF5 tests detected %d errors***\n", nerrors); - else{ - printf("===================================\n"); - printf("PHDF5 tests finished with no errors\n"); - printf("===================================\n"); - } - } - MPI_Finalize(); - - return(nerrors); -} - -#else /* H5_HAVE_PARALLEL */ -/* dummy program since H5_HAVE_PARALLE is not configured in */ -int -main() -{ -printf("No PHDF5 example because parallel is not configured in\n"); -return(0); -} -#endif /* H5_HAVE_PARALLEL */ diff --git a/doc/html/ph5implement.txt b/doc/html/ph5implement.txt deleted file mode 100644 index 2fcbb3d..0000000 --- a/doc/html/ph5implement.txt +++ /dev/null @@ -1,27 +0,0 @@ -Release information for parallel HDF5 -------------------------------------- - -+) Current release supports independent access to fixed dimension datasets - only. - -+) The comm and info arguments of H5Pset_mpi are not used. All parallel - I/O are done via MPI_COMM_WORLD. Access_mode for H5Pset_mpi can be - H5ACC_INDEPENDENT only. - -+) This release of parallel HDF5 has been tested on IBM SP2 and SGI - Origin 2000 systems. It uses the ROMIO version of MPIO interface - for parallel I/O supports. - -+) Useful URL's. - Parallel HDF webpage: "http://hdf.ncsa.uiuc.edu/Parallel_HDF/" - ROMIO webpage: "http://www.mcs.anl.gov/home/thakur/romio/" - -+) Some to-do items for future releases - support for Intel Teraflop platform. - support for unlimited dimension datasets. - support for file access via a communicator besides MPI_COMM_WORLD. - support for collective access to datasets. - support for independent create/open of datasets. - ----- -Last updated: Feb 16, 1998. diff --git a/doc/html/pipe1.gif b/doc/html/pipe1.gif deleted file mode 100644 index 3b489a6..0000000 Binary files a/doc/html/pipe1.gif and /dev/null differ diff --git a/doc/html/pipe1.obj b/doc/html/pipe1.obj deleted file mode 100644 index 41f3461..0000000 --- a/doc/html/pipe1.obj +++ /dev/null @@ -1,136 +0,0 @@ -%TGIF 3.0-p5 -state(1,33,100,0,0,0,8,1,9,1,1,0,0,0,0,1,1,'Helvetica',0,17,0,0,0,10,0,0,1,1,0,16,0,0,1,1,1,0,1408,1088,0,0,2880). -% -% @(#)$Header$ -% %W% -% -unit("1 pixel/pixel"). -page(1,"",1). -box('black',64,64,128,256,0,1,1,22,0,0,0,0,0,'1',[ -]). -box('black',80,96,112,224,26,1,1,23,0,0,0,0,0,'1',[ -]). -poly('black',2,[ - 128,160,912,160],1,2,1,24,0,0,0,0,10,4,0,0,0,'2','10','4', - "0",[ -]). -poly('black',5,[ - 160,160,144,224,160,272,176,224,160,160],1,2,1,25,2,0,0,0,10,4,0,0,0,'2','10','4', - "",[ -]). -poly('black',5,[ - 848,160,832,224,848,272,864,224,848,160],1,2,1,34,2,0,0,0,10,4,0,0,0,'2','10','4', - "",[ -]). -box('black',464,192,496,256,26,1,1,39,0,0,0,0,0,'1',[ -]). -poly('black',2,[ - 160,224,464,224],1,2,1,40,0,26,0,0,10,4,0,0,0,'2','10','4', - "0",[ -]). -poly('black',2,[ - 496,224,848,224],1,2,1,41,0,26,0,0,10,4,0,0,0,'2','10','4', - "0",[ -]). -poly('black',5,[ - 192,224,176,288,192,336,208,288,192,224],1,2,1,42,2,0,0,0,10,4,0,0,0,'2','10','4', - "",[ -]). -poly('black',5,[ - 432,224,416,288,432,336,448,288,432,224],1,2,1,43,2,0,0,0,10,4,0,0,0,'2','10','4', - "",[ -]). -poly('black',2,[ - 192,288,432,288],1,2,1,44,0,26,0,0,10,4,0,0,0,'2','10','4', - "0",[ -]). -box('black',464,352,496,416,26,1,1,45,0,0,0,0,0,'1',[ -]). -poly('black',5,[ - 528,224,512,288,528,336,544,288,528,224],1,2,1,46,2,0,0,0,10,4,0,0,0,'2','10','4', - "",[ -]). -poly('black',5,[ - 816,224,800,288,816,336,832,288,816,224],1,2,1,47,2,0,0,0,10,4,0,0,0,'2','10','4', - "",[ -]). -poly('black',2,[ - 528,288,816,288],1,2,1,48,0,26,0,0,10,4,0,0,0,'2','10','4', - "0",[ -]). -poly('black',5,[ - 464,256,456,304,464,328,488,304,488,256],1,2,1,62,2,0,0,0,10,4,0,0,0,'2','10','4', - "",[ -]). -poly('black',2,[ - 480,352,488,304],2,2,1,85,0,0,0,0,10,4,0,0,0,'2','10','4', - "0",[ -]). -box('black',912,64,976,256,0,1,1,87,0,0,0,0,0,'1',[ -]). -box('black',928,96,960,224,26,1,1,88,0,0,0,0,0,'1',[ -]). -text('black',96,48,'Helvetica',0,17,1,1,0,1,21,15,89,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "File"]). -text('black',944,48,'Helvetica',0,17,1,1,0,1,64,15,93,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Application"]). -text('black',480,144,'Helvetica',0,17,1,1,0,1,65,15,99,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5D_read()"]). -text('black',480,128,'Helvetica',0,17,1,1,0,1,58,15,108,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5Dread()"]). -text('black',304,208,'Helvetica',0,17,1,1,0,1,86,15,115,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5F_arr_read()"]). -text('black',304,192,'Helvetica',0,17,1,1,0,1,99,15,119,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5S_simp_fgath()"]). -text('black',296,288,'Helvetica',0,17,1,1,0,1,101,15,125,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5F_block_read()"]). -text('black',296,304,'Helvetica',0,17,1,1,0,1,90,15,132,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5F_low_read()"]). -text('black',296,320,'Helvetica',0,17,1,1,0,1,98,15,136,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5F_sec2_read()"]). -text('black',296,336,'Helvetica',0,17,1,1,0,1,33,15,140,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "read()"]). -text('black',664,208,'Helvetica',0,17,1,1,0,1,106,15,146,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5V_stride_copy()"]). -text('black',664,176,'Helvetica',0,17,1,1,0,1,104,15,150,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5S_simp_mscat()"]). -text('black',664,272,'Helvetica',0,17,1,1,0,1,54,15,154,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "memcpy()"]). -text('black',384,392,'Helvetica',0,17,1,1,0,1,105,15,170,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5T_conv_struct()"]). -poly('black',4,[ - 392,384,400,352,440,368,456,336],1,1,1,172,1,0,0,0,8,3,0,0,0,'1','8','3', - "6",[ -]). -text('black',480,176,'Helvetica',0,17,1,1,0,1,44,15,176,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "TCONV"]). -text('black',480,416,'Helvetica',0,17,1,1,0,1,25,15,182,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "BKG"]). -box('black',48,32,992,512,0,1,1,186,0,0,0,0,0,'1',[ -]). -poly('black',5,[ - 72,392,56,456,72,504,88,456,72,392],1,2,1,188,2,0,0,0,10,4,0,0,0,'2','10','4', - "",[ -]). -text('black',96,448,'Helvetica',0,17,1,0,0,1,46,15,189,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "== Loop"]). -poly('black',3,[ - 48,384,152,384,152,512],0,1,1,191,0,0,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -text('black',480,40,'Helvetica',0,24,1,1,0,1,380,29,197,0,24,5,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Fig 1: Internal Contiguous Storage"]). -text('black',136,144,'Helvetica',0,17,1,1,0,1,9,15,201,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "A"]). -text('black',160,208,'Helvetica',0,17,1,1,0,1,8,15,207,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "B"]). -text('black',192,272,'Helvetica',0,17,1,1,0,1,9,15,211,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "C"]). -text('black',504,208,'Helvetica',0,17,1,1,0,1,8,15,215,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "E"]). -text('black',528,272,'Helvetica',0,17,1,1,0,1,8,15,223,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "F"]). -text('black',464,304,'Helvetica',0,17,1,1,0,1,9,15,231,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "D"]). -text('black',664,192,'Helvetica',0,17,1,1,0,1,107,15,324,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5V_hyper_copy()"]). diff --git a/doc/html/pipe2.gif b/doc/html/pipe2.gif deleted file mode 100644 index 3a0c947..0000000 Binary files a/doc/html/pipe2.gif and /dev/null differ diff --git a/doc/html/pipe2.obj b/doc/html/pipe2.obj deleted file mode 100644 index 70d9c18..0000000 --- a/doc/html/pipe2.obj +++ /dev/null @@ -1,168 +0,0 @@ -%TGIF 3.0-p5 -state(1,33,100,0,0,0,8,1,9,1,1,1,1,0,0,1,1,'Helvetica',0,17,0,0,0,10,0,0,1,1,0,16,0,0,1,1,1,0,1408,1088,0,0,2880). -% -% @(#)$Header$ -% %W% -% -unit("1 pixel/pixel"). -page(1,"",1). -box('black',64,64,128,256,0,1,1,22,0,0,0,0,0,'1',[ -]). -box('black',80,96,112,224,26,1,1,23,0,0,0,0,0,'1',[ -]). -poly('black',2,[ - 128,160,912,160],1,2,1,24,0,0,0,0,10,4,0,0,0,'2','10','4', - "0",[ -]). -poly('black',5,[ - 160,160,144,224,160,272,176,224,160,160],1,2,1,25,2,0,0,0,10,4,0,0,0,'2','10','4', - "",[ -]). -poly('black',5,[ - 848,160,832,224,848,272,864,224,848,160],1,2,1,34,2,0,0,0,10,4,0,0,0,'2','10','4', - "",[ -]). -box('black',464,192,496,256,26,1,1,39,0,0,0,0,0,'1',[ -]). -poly('black',2,[ - 160,224,464,224],1,2,1,40,0,26,0,0,10,4,0,0,0,'2','10','4', - "0",[ -]). -poly('black',2,[ - 496,224,848,224],1,2,1,41,0,26,0,0,10,4,0,0,0,'2','10','4', - "0",[ -]). -poly('black',5,[ - 192,224,176,288,192,336,208,288,192,224],1,2,1,42,2,0,0,0,10,4,0,0,0,'2','10','4', - "",[ -]). -poly('black',5,[ - 432,224,416,288,432,336,448,288,432,224],1,2,1,43,2,0,0,0,10,4,0,0,0,'2','10','4', - "",[ -]). -poly('black',2,[ - 192,288,432,288],1,2,1,44,0,26,0,0,10,4,0,0,0,'2','10','4', - "0",[ -]). -box('black',464,352,496,416,26,1,1,45,0,0,0,0,0,'1',[ -]). -poly('black',5,[ - 528,224,512,288,528,336,544,288,528,224],1,2,1,46,2,0,0,0,10,4,0,0,0,'2','10','4', - "",[ -]). -poly('black',5,[ - 816,224,800,288,816,336,832,288,816,224],1,2,1,47,2,0,0,0,10,4,0,0,0,'2','10','4', - "",[ -]). -poly('black',2,[ - 528,288,816,288],1,2,1,48,0,26,0,0,10,4,0,0,0,'2','10','4', - "0",[ -]). -poly('black',5,[ - 848,240,848,352,832,384,800,384,496,384],1,2,1,55,1,0,0,0,10,4,0,0,0,'2','10','4', - "70",[ -]). -poly('black',5,[ - 528,384,512,448,528,496,544,448,528,384],1,2,1,57,2,0,0,0,10,4,0,0,0,'2','10','4', - "",[ -]). -poly('black',5,[ - 800,384,784,448,800,496,816,448,800,384],1,2,1,58,2,0,0,0,10,4,0,0,0,'2','10','4', - "",[ -]). -poly('black',2,[ - 800,448,528,448],1,2,1,61,0,0,0,0,10,4,0,0,0,'2','10','4', - "0",[ -]). -poly('black',5,[ - 464,256,456,304,464,328,488,304,488,256],1,2,1,62,2,0,0,0,10,4,0,0,0,'2','10','4', - "",[ -]). -poly('black',2,[ - 480,352,488,304],0,2,1,85,0,0,0,0,10,4,0,0,0,'2','10','4', - "0",[ -]). -box('black',912,64,976,256,0,1,1,87,0,0,0,0,0,'1',[ -]). -box('black',928,96,960,224,26,1,1,88,0,0,0,0,0,'1',[ -]). -text('black',96,48,'Helvetica',0,17,1,1,0,1,21,15,89,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "File"]). -text('black',944,48,'Helvetica',0,17,1,1,0,1,64,15,93,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Application"]). -text('black',480,144,'Helvetica',0,17,1,1,0,1,65,15,99,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5D_read()"]). -text('black',480,128,'Helvetica',0,17,1,1,0,1,58,15,108,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5Dread()"]). -text('black',304,208,'Helvetica',0,17,1,1,0,1,86,15,115,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5F_arr_read()"]). -text('black',304,192,'Helvetica',0,17,1,1,0,1,99,15,119,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5S_simp_fgath()"]). -text('black',296,288,'Helvetica',0,17,1,1,0,1,101,15,125,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5F_block_read()"]). -text('black',296,304,'Helvetica',0,17,1,1,0,1,90,15,132,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5F_low_read()"]). -text('black',296,320,'Helvetica',0,17,1,1,0,1,98,15,136,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5F_sec2_read()"]). -text('black',296,336,'Helvetica',0,17,1,1,0,1,33,15,140,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "read()"]). -text('black',664,208,'Helvetica',0,17,1,1,0,1,106,15,146,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5V_stride_copy()"]). -text('black',664,176,'Helvetica',0,17,1,1,0,1,104,15,150,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5S_simp_mscat()"]). -text('black',664,272,'Helvetica',0,17,1,1,0,1,54,15,154,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "memcpy()"]). -text('black',672,368,'Helvetica',0,17,1,1,0,1,106,15,158,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5V_stride_copy()"]). -text('black',672,336,'Helvetica',0,17,1,1,0,1,105,15,162,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5S_simp_mgath()"]). -text('black',672,432,'Helvetica',0,17,1,1,0,1,54,15,166,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "memcpy()"]). -text('black',384,392,'Helvetica',0,17,1,1,0,1,105,15,170,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5T_conv_struct()"]). -poly('black',4,[ - 392,384,400,352,440,368,456,336],1,1,1,172,1,0,0,0,8,3,0,0,0,'1','8','3', - "6",[ -]). -text('black',480,176,'Helvetica',0,17,1,1,0,1,44,15,176,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "TCONV"]). -text('black',480,416,'Helvetica',0,17,1,1,0,1,25,15,182,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "BKG"]). -box('black',48,32,992,512,0,1,1,186,0,0,0,0,0,'1',[ -]). -poly('black',5,[ - 72,392,56,456,72,504,88,456,72,392],1,2,1,188,2,0,0,0,10,4,0,0,0,'2','10','4', - "",[ -]). -text('black',96,448,'Helvetica',0,17,1,0,0,1,46,15,189,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "== Loop"]). -poly('black',3,[ - 48,384,152,384,152,512],0,1,1,191,0,0,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -text('black',480,40,'Helvetica',0,24,1,1,0,1,404,29,197,0,24,5,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Fig 2: Partially Initialized Destination"]). -text('black',136,144,'Helvetica',0,17,1,1,0,1,9,15,201,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "A"]). -text('black',160,208,'Helvetica',0,17,1,1,0,1,8,15,207,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "B"]). -text('black',192,272,'Helvetica',0,17,1,1,0,1,9,15,211,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "C"]). -text('black',504,208,'Helvetica',0,17,1,1,0,1,8,15,215,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "E"]). -text('black',528,272,'Helvetica',0,17,1,1,0,1,8,15,223,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "F"]). -text('black',856,288,'Helvetica',0,17,1,1,0,1,9,15,225,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "G"]). -text('black',800,432,'Helvetica',0,17,1,1,0,1,9,15,229,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H"]). -text('black',464,304,'Helvetica',0,17,1,1,0,1,9,15,231,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "D"]). -poly('black',4,[ - 848,240,848,224,864,224,904,224],0,2,1,318,1,0,0,0,10,4,0,0,0,'2','10','4', - "6",[ -]). -text('black',664,192,'Helvetica',0,17,1,1,0,1,107,15,326,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5V_hyper_copy()"]). -text('black',672,352,'Helvetica',0,17,1,1,0,1,107,15,334,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5V_hyper_copy()"]). diff --git a/doc/html/pipe3.gif b/doc/html/pipe3.gif deleted file mode 100644 index 26d82ad..0000000 Binary files a/doc/html/pipe3.gif and /dev/null differ diff --git a/doc/html/pipe3.obj b/doc/html/pipe3.obj deleted file mode 100644 index cdfef7c..0000000 --- a/doc/html/pipe3.obj +++ /dev/null @@ -1,70 +0,0 @@ -%TGIF 3.0-p5 -state(1,33,100,0,0,0,8,1,9,1,1,0,0,0,0,1,1,'Helvetica',0,17,0,0,0,10,0,0,1,1,0,16,0,0,1,1,1,0,1408,1088,0,0,2880). -% -% @(#)$Header$ -% %W% -% -unit("1 pixel/pixel"). -page(1,"",1). -box('black',64,64,128,256,0,1,1,22,0,0,0,0,0,'1',[ -]). -box('black',80,96,112,224,26,1,1,23,0,0,0,0,0,'1',[ -]). -poly('black',2,[ - 128,160,912,160],1,2,1,24,0,0,0,0,10,4,0,0,0,'2','10','4', - "0",[ -]). -box('black',912,64,976,256,0,1,1,87,0,0,0,0,0,'1',[ -]). -box('black',928,96,960,224,26,1,1,88,0,0,0,0,0,'1',[ -]). -text('black',96,48,'Helvetica',0,17,1,1,0,1,21,15,89,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "File"]). -text('black',944,48,'Helvetica',0,17,1,1,0,1,64,15,93,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Application"]). -text('black',480,104,'Helvetica',0,17,1,1,0,1,65,15,99,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5D_read()"]). -text('black',480,88,'Helvetica',0,17,1,1,0,1,58,15,108,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5Dread()"]). -box('black',48,32,992,512,0,1,1,186,0,0,0,0,0,'1',[ -]). -poly('black',5,[ - 72,392,56,456,72,504,88,456,72,392],1,2,1,188,2,0,0,0,10,4,0,0,0,'2','10','4', - "",[ -]). -text('black',96,448,'Helvetica',0,17,1,0,0,1,46,15,189,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "== Loop"]). -poly('black',3,[ - 48,384,152,384,152,512],0,1,1,191,0,0,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -text('black',480,40,'Helvetica',0,24,1,1,0,1,295,29,197,0,24,5,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Fig 3: No Type Conversion"]). -text('black',136,144,'Helvetica',0,17,1,1,0,1,9,15,201,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "A"]). -poly('black',5,[ - 152,160,136,224,152,272,168,224,152,160],1,2,1,273,2,0,0,0,10,4,0,0,0,'2','10','4', - "",[ -]). -text('black',480,120,'Helvetica',0,17,1,1,0,1,96,15,277,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5S_simp_read()"]). -text('black',480,136,'Helvetica',0,17,1,1,0,1,86,15,281,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5F_arr_read()"]). -poly('black',5,[ - 880,160,864,224,880,272,896,224,880,160],1,2,1,283,2,0,0,0,10,4,0,0,0,'2','10','4', - "",[ -]). -poly('black',2,[ - 152,224,880,224],1,2,1,286,0,0,0,0,10,4,0,0,0,'2','10','4', - "0",[ -]). -text('black',480,232,'Helvetica',0,17,1,1,0,1,101,15,291,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5F_block_read()"]). -text('black',480,248,'Helvetica',0,17,1,1,0,1,90,15,293,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5F_low_read()"]). -text('black',480,264,'Helvetica',0,17,1,1,0,1,98,15,309,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5F_sec2_read()"]). -text('black',480,280,'Helvetica',0,17,1,1,0,1,33,15,311,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "read()"]). -text('black',176,208,'Helvetica',0,17,1,1,0,1,8,15,418,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "B"]). diff --git a/doc/html/pipe4.gif b/doc/html/pipe4.gif deleted file mode 100644 index a3a857b..0000000 Binary files a/doc/html/pipe4.gif and /dev/null differ diff --git a/doc/html/pipe4.obj b/doc/html/pipe4.obj deleted file mode 100644 index 6f50123..0000000 --- a/doc/html/pipe4.obj +++ /dev/null @@ -1,92 +0,0 @@ -%TGIF 3.0-p5 -state(1,33,100,0,0,0,8,1,9,1,1,1,2,1,0,1,0,'Helvetica',0,17,0,0,0,10,0,0,1,1,0,16,0,0,1,1,1,0,1408,1088,0,0,2880). -% -% @(#)$Header$ -% %W% -% -unit("1 pixel/pixel"). -page(1,"",1). -box('black',64,64,128,256,0,1,1,22,0,0,0,0,0,'1',[ -]). -box('black',80,96,112,224,26,1,1,23,0,0,0,0,0,'1',[ -]). -poly('black',2,[ - 128,160,912,160],1,2,1,24,0,0,0,0,10,4,0,0,0,'2','10','4', - "0",[ -]). -box('black',912,96,944,224,26,1,1,88,0,0,0,0,0,'1',[ -]). -text('black',96,48,'Helvetica',0,17,1,1,0,1,21,15,89,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "File"]). -text('black',928,72,'Helvetica',0,17,1,1,0,1,32,15,93,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Buffer"]). -box('black',48,32,992,512,0,1,1,186,0,0,0,0,0,'1',[ -]). -poly('black',5,[ - 72,392,56,456,72,504,88,456,72,392],1,2,1,188,2,0,0,0,10,4,0,0,0,'2','10','4', - "",[ -]). -text('black',96,448,'Helvetica',0,17,1,0,0,1,46,15,189,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "== Loop"]). -poly('black',3,[ - 48,384,152,384,152,512],0,1,1,191,0,0,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -text('black',480,40,'Helvetica',0,24,1,1,0,1,372,29,197,0,24,5,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Fig 4: Regularly Chunked Storage"]). -text('black',136,144,'Helvetica',0,17,1,1,0,1,9,15,201,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "A"]). -text('black',480,104,'Helvetica',0,17,1,1,0,1,86,15,281,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5F_arr_read()"]). -text('black',480,120,'Helvetica',0,17,1,1,0,1,102,15,349,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5F_istore_read()"]). -text('black',480,136,'Helvetica',0,17,1,1,0,1,167,15,351,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5F_istore_copy_hyperslab()"]). -poly('black',5,[ - 160,160,144,224,160,272,176,224,160,160],1,2,1,362,2,0,0,0,10,4,0,0,0,'2','10','4', - "",[ -]). -poly('black',5,[ - 880,160,864,224,880,272,896,224,880,160],1,2,1,363,2,0,0,0,10,4,0,0,0,'2','10','4', - "",[ -]). -box('black',448,192,512,256,26,1,1,364,0,0,0,0,0,'1',[ -]). -text('black',480,176,'Helvetica',0,17,1,1,0,1,43,15,367,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "CHUNK"]). -poly('black',2,[ - 160,224,448,224],1,2,1,372,0,0,0,0,10,4,0,0,0,'2','10','4', - "0",[ -]). -poly('black',2,[ - 512,224,880,224],1,2,1,373,0,0,0,0,10,4,0,0,0,'2','10','4', - "0",[ -]). -text('black',288,224,'Helvetica',0,17,1,1,0,1,101,15,385,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5F_block_read()"]). -text('black',288,240,'Helvetica',0,17,1,1,0,1,90,15,387,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5F_low_read()"]). -text('black',288,256,'Helvetica',0,17,1,1,0,1,98,15,391,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5F_sec2_read()"]). -text('black',288,272,'Helvetica',0,17,1,1,0,1,33,15,395,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "read()"]). -poly('black',5,[ - 456,256,448,296,480,320,512,296,504,256],1,2,1,401,2,0,0,0,10,4,0,0,0,'2','10','4', - "",[ -]). -text('black',184,208,'Helvetica',0,17,1,1,0,1,8,15,422,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "B"]). -text('black',520,208,'Helvetica',0,17,1,1,0,1,9,15,434,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "D"]). -text('black',440,272,'Helvetica',0,17,1,1,0,1,9,15,440,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "C"]). -text('black',480,320,'Helvetica',0,17,1,1,0,1,107,15,444,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5Z_uncompress()"]). -text('black',672,224,'Helvetica',0,17,1,1,0,1,107,15,454,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5V_hyper_copy()"]). -text('black',672,240,'Helvetica',0,17,1,1,0,1,106,15,464,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5V_stride_copy()"]). -text('black',672,256,'Helvetica',0,17,1,1,0,1,54,15,466,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "memcpy()"]). -text('black',168,488,'Helvetica',0,17,1,0,0,1,282,15,471,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "NOTE: H5Z_uncompress() is not implemented yet."]). diff --git a/doc/html/pipe5.gif b/doc/html/pipe5.gif deleted file mode 100644 index 6ae0098..0000000 Binary files a/doc/html/pipe5.gif and /dev/null differ diff --git a/doc/html/pipe5.obj b/doc/html/pipe5.obj deleted file mode 100644 index 4738bbd..0000000 --- a/doc/html/pipe5.obj +++ /dev/null @@ -1,52 +0,0 @@ -%TGIF 3.0-p5 -state(1,33,100,0,0,0,8,1,9,1,1,1,2,1,0,1,0,'Helvetica',0,17,0,0,0,10,0,0,1,1,0,16,0,0,1,1,1,0,1408,1088,0,0,2880). -% -% @(#)$Header$ -% %W% -% -unit("1 pixel/pixel"). -page(1,"",1). -box('black',64,64,128,256,0,1,1,22,0,0,0,0,0,'1',[ -]). -box('black',80,96,112,224,26,1,1,23,0,0,0,0,0,'1',[ -]). -poly('black',2,[ - 128,160,912,160],1,2,1,24,0,0,0,0,10,4,0,0,0,'2','10','4', - "0",[ -]). -box('black',912,96,944,224,26,1,1,88,0,0,0,0,0,'1',[ -]). -text('black',96,48,'Helvetica',0,17,1,1,0,1,21,15,89,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "File"]). -text('black',928,72,'Helvetica',0,17,1,1,0,1,32,15,93,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Buffer"]). -box('black',48,32,992,512,0,1,1,186,0,0,0,0,0,'1',[ -]). -text('black',480,40,'Helvetica',0,24,1,1,0,1,333,29,197,0,24,5,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Fig 5: Reading a Single Chunk"]). -text('black',136,144,'Helvetica',0,17,1,1,0,1,9,15,201,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "A"]). -text('black',480,112,'Helvetica',0,17,1,1,0,1,86,15,281,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5F_arr_read()"]). -text('black',480,128,'Helvetica',0,17,1,1,0,1,102,15,349,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5F_istore_read()"]). -text('black',480,144,'Helvetica',0,17,1,1,0,1,167,15,351,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5F_istore_copy_hyperslab()"]). -text('black',480,160,'Helvetica',0,17,1,1,0,1,101,15,385,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5F_block_read()"]). -text('black',480,176,'Helvetica',0,17,1,1,0,1,90,15,387,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5F_low_read()"]). -text('black',480,192,'Helvetica',0,17,1,1,0,1,98,15,391,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5F_sec2_read()"]). -text('black',480,208,'Helvetica',0,17,1,1,0,1,33,15,395,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "read()"]). -text('black',864,240,'Helvetica',0,17,1,1,0,1,107,15,444,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5Z_uncompress()"]). -text('black',56,488,'Helvetica',0,17,1,0,0,1,282,15,471,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "NOTE: H5Z_uncompress() is not implemented yet."]). -poly('black',5,[ - 912,176,864,176,840,208,872,232,912,216],1,2,1,490,2,0,0,0,10,4,0,0,0,'2','10','4', - "",[ -]). -text('black',896,184,'Helvetica',0,17,1,0,0,1,8,15,491,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "B"]). diff --git a/doc/html/review1.html b/doc/html/review1.html deleted file mode 100644 index 50d55cd..0000000 --- a/doc/html/review1.html +++ /dev/null @@ -1,283 +0,0 @@ - -HDF5 Draft Revised API Example Code - - -
      -

      HDF5: Revised API Example Code

      -
      - -

      Example programs/sections of code below: -

      -
      #1 -
      A simple example showing how to create a file. -
      #2 -
      A example showing how to check if a file is an HDF5 file and list it's contents. -
      #3 -
      A example showing how to create a homogenous multi-dimensional dataset. -
      #4 -
      A example showing how to create a compound 1-D dataset. -
      #5 -
      A example showing how to create a compound multi-dimensional dataset. -
      #6 -
      A example showing how to read a generic dataset. -
      - -
      -

      Simple Example showing how to create a file.

      - -

      Notes:
      -This example creates a new HDF5 file and allows write access. -If the file exists already, the H5F_ACC_TRUNC flag would also be necessary to -overwrite the previous file's information. - -

      Code: - -

      -    hid_t file_id;
      -
      -    file_id=H5Fcreate("example1.h5",0);
      -
      -    H5Fclose(file_id);
      -
      -
      - -
      -

      Example showing how check if a file is an HDF5 file and list it's contents.

      - -

      Notes:
      -This example checks if a file is an HDF5 file and lists the contents of the top -level (file level) group. - -

      Code: - -

      -    hid_t file_id;      /* File ID */
      -    uint32 num_items;   /* number of items in top-level group */
      -    intn i;             /* counter */
      -    char *obj_name;     /* object's name as string atom */
      -    uintn name_len;     /* object name's length in chars */
      -    uintn buf_len=0;    /* buffer length for names */
      -    char *buf=NULL;     /* buffer for names */
      -
      -    if(H5Fis_hdf5("example2.h5")==TRUE)
      -      {
      -        file_id=H5Fopen("example2.h5",H5F_ACC_RDWR|H5ACC_CREATE);
      -        num_items=H5GgetNumContents(file_id);
      -        for(i=0; i<num_items; i++)
      -          {
      -            obj_name=H5GgetNameByIndex(file_id,i,NULL,0);
      -            printf("object #%d is: %s\n",i,buf);
      -            HDfree(obj_name);
      -          } 
      -        H5Fclose(file_id);
      -      }
      -
      -
      - -
      -

      Example showing how create a homogenous multi-dimensional dataset.

      - -

      Notes:
      -This example creates a 4-dimensional dataset of 32-bit floating-point -numbers, corresponding to the current Scientific Dataset functionality. -This example assumes that the datatype and dataspace of the dataset will not -be re-used. - -

      Code: - -

      -    hid_t file_id;                  /* File's ID */
      -    uint32 dims[4]={6,5,4,3};       /* the size of each dimension */
      -    hid_t dataset_id;               /* new object's ID */
      -    float32 obj_data[6][5][4][3];   /* storage for the dataset's data */
      -
      -    if((file_id=H5Fcreate("example3.h5",H5F_ACC_TRUNC))>=0)
      -      {
      -        /* Create & initialize the dataset object */
      -        dataset_id=H5Mcreate(file_id,H5OBJ_DATASET,"Simple Object");
      -
      -        /* Create & initialize a datatype object */
      -        H5TsetType(dataset_id,H5TYPE_FLOAT,4,H5T_BIGENDIAN);
      -
      -        /* Initialize dimensionality of dataset */
      -        H5SsetSpace(dataset_id,rank,dims);
      -
      -        <initialize data array>
      -
      -        /* Write the entire dataset out */
      -        H5Dwrite(dataset_id,H5S_SCALAR,data);
      -        <or>
      -        H5Dwrite(dataset_id,dataset_id,data);
      -
      -        /* Release the atoms we've created */
      -        H5Mrelease(dataset_id);
      -
      -        /* close the file */
      -        H5Fclose(file_id);
      -      }
      -
      - -
      -

      Example showing how create a compound 1-D dataset.

      - -

      Notes:
      -This example creates a 1-dimensional dataset of compound datatype records, -corresponding to the current Vdata functionality. This example also assumes -that the datatype and dataspace will not be re-used. - -

      Code: - -

      -    hid_t file_id;              /* File's ID */
      -    uint32 dims[1]={45};        /* the size of the dimension */
      -    hid_t dataset_id;           /* object's ID */
      -    void *obj_data;             /* pointer to the dataset's data */
      -
      -    if((file_id=H5Fcreate("example4.h5",H5F_ACC_TRUNC))>=0)
      -      {
      -        /* Create & initialize the dataset object */
      -        dataset_id=H5Mcreate(file_id,H5OBJ_DATASET,"Compound Object");
      -
      -        /* Initialize datatype */
      -        H5TsetType(dataset_id,H5TYPE_STRUCT);
      -        H5TaddField(dataset_id,H5TYPE_FLOAT32,"Float32 Scalar Field",H5SPACE_SCALAR);
      -        H5TaddField(dataset_id,H5TYPE_CHAR,"Char Field",H5SPACE_SCALAR);
      -        H5TaddField(dataset_id,H5TYPE_UINT16,"UInt16 Field",H5SPACE_SCALAR);
      -        H5TendDefine(dataset_id);
      -
      -        /* Initialize dimensionality */
      -        H5SsetSpace(dataset_id,1,dims);
      -
      -        <initialize data array>
      -
      -        /* Write the entire dataset out */
      -        H5Dwrite(dataset_id,H5S_SCALAR,data);
      -
      -        /* Release the atoms we've created */
      -        H5Mrelease(dataset_id);
      -
      -        /* close the file */
      -        H5Fclose(file_id);
      -      }
      -
      - -
      -

      Example showing how create a compound multi-dimensional dataset.

      - -

      Notes:
      -This example creates a 3-dimensional dataset of compound datatype records, -roughly corresponding to a multi-dimensional Vdata functionality. This -example also shows the use of multi-dimensional fields in the compound datatype. -This example uses "stand-alone" datatypes and dataspaces. - -

      Code: - -

      -    hid_t file_id;              /* File's ID */
      -    hid_t type_id;              /* datatype's ID */
      -    hid_t dim_id;               /* dimensionality's ID */
      -    uint32 dims[3]={95,67,5};   /* the size of the dimensions */
      -    hid_t field_dim_id;         /* dimensionality ID for fields in the structure */
      -    uint32 field_dims[4];       /* array for field dimensions */
      -    hid_t dataset_id;           /* object's ID */
      -    void *obj_data;             /* pointer to the dataset's data */
      -
      -    if((file_id=H5Fcreate("example5.h5",H5F_ACC_TRUNC))>=0)
      -      {
      -        /* Create & initialize a datatype object */
      -        type_id=H5Mcreate(file_id,H5OBJ_DATATYPE,"Compound Type #1");
      -        H5TsetType(type_id,H5TYPE_STRUCT);
      -
      -        /* Create each multi-dimensional field in structure */
      -        field_dim_id=H5Mcreate(file_id,H5OBJ_DATASPACE,"Lat/Long Dims");
      -        field_dims[0]=360;
      -        field_dims[1]=720;
      -        H5SsetSpace(field_dim_id,2,field_dims);
      -        H5TaddField(type_id,H5TYPE_FLOAT32,"Lat/Long Locations",field_dim_id);
      -        H5Mrelease(field_dim_id);
      -
      -        field_dim_id=H5Mcreate(file_id,H5OBJ_DATASPACE,"Browse Dims");
      -        field_dims[0]=40;
      -        field_dims[1]=40;
      -        H5SsetSpace(field_dim_id,2,field_dims);
      -        H5TaddField(type_id,H5TYPE_CHAR,"Browse Image",field_dim_id);
      -        H5Mrelease(field_dim_id);
      -
      -        field_dim_id=H5Mcreate(file_id,H5OBJ_DATASPACE,"Multispectral Dims");
      -        field_dims[0]=80;
      -        field_dims[1]=60;
      -        field_dims[2]=40;
      -        H5SsetSpace(field_dim_id,3,field_dims);
      -        H5TaddField(type_id,H5TYPE_UINT16,"Multispectral Scans",field_dim_id);
      -        H5Mrelease(field_dim_id);
      -        H5TendDefine(type_id);
      -
      -        /* Create & initialize a dimensionality object */
      -        dim_id=H5Mcreate(file_id,H5OBJ_DATASPACE,"3-D Dim");
      -        H5SsetSpace(dim_id,3,dims);
      -
      -        /* Create & initialize the dataset object */
      -        dataset_id=H5Mcreate(file_id,H5OBJ_DATASET,"Compound Multi-Dim Object");
      -        H5DsetInfo(dataset_id,type_id,dim_id);
      -
      -        <initialize data array>
      -
      -        /* Write the entire dataset out */
      -        H5Dwrite(dataset_id,H5S_SCALAR,data);
      -
      -        /* Release the atoms we've created */
      -        H5Mrelease(type_id);
      -        H5Mrelease(dim_id);
      -        H5Mrelease(dataset_id);
      -
      -        /* close the file */
      -        H5Fclose(file_id);
      -      }
      -
      - -
      -

      Example showing how read a generic dataset.

      - -

      Notes:
      -This example shows how to get the information for and display a generic -dataset. - -

      Code: - -

      -    hid_t file_id;      /* File's ID */
      -    hid_t dataset_id;   /* dataset's ID in memory */
      -    uintn elem_size;    /* size of each element */
      -    uintn nelems;       /* number of elements in array */
      -    void *obj_data;     /* pointer to the dataset's data */
      -
      -    if((file_id=H5Fopen("example6.h5",0))>=0)
      -      {
      -        /* Attach to a datatype object */
      -        dataset_id=H5MaccessByIndex(obj_oid,0);
      -
      -        if(H5TbaseType(dataset_id)==H5T_COMPOUND)
      -          {
      -            <set up for compound object>
      -          } 
      -        else
      -          {
      -            <set up for homogenous object>
      -          } 
      -
      -        elem_size=H5Tsize(dataset_id);
      -        nelems=H5Snelem(dataset_id);
      -        <allocate space based on element size and number of elements >
      -
      -        /* Read in the dataset */
      -        H5Dwrite(dataset_id,H5S_SCALAR,data);
      -            <or>
      -        H5Dwrite(dataset_id,dataset_id,data);
      -
      -        /* Release the atoms we've accessed */
      -        H5Mrelease(dataset_id);
      -
      -        /* close the file */
      -        H5Fclose(file_id);
      -      }
      -
      diff --git a/doc/html/review1a.html b/doc/html/review1a.html deleted file mode 100644 index 3df8af7..0000000 --- a/doc/html/review1a.html +++ /dev/null @@ -1,252 +0,0 @@ - - - - Group Examples - - -

      Group Examples

      - -
      -

      Background

      - -

      Directories (or now Groups) are currently implemented as - a directed graph with a single entry point into the graph which - is the Root Object. The root object is usually a - group. All objects have at least one predecessor (the Root - Object always has the HDF5 file super block as a - predecessor). The number of predecessors of a group is also - known as the hard link count or just link count. - Unlike Unix directories, HDF5 groups have no ".." entry since - any group can have multiple predecessors. Given the handle or - id of some object and returning a full name for that object - would be an expensive graph traversal. - -

      A special optimization is that a file may contain a single - non-group object and no group(s). The object has one - predecessor which is the file super block. However, once a root - group is created it never dissappears (although I suppose it - could if we wanted). - -

      A special object called a Symbolic Link is simply a - name. Usually the name refers to some (other) object, but that - object need not exist. Symbolic links in HDF5 will have the - same semantics as symbolic links in Unix. - -

      The symbol table graph contains "entries" for each name. An - entry contains the file address for the object header and - possibly certain messages cached from the object header. - -

      The H5G package understands the notion of opening and object - which means that given the name of the object, a handle to the - object is returned (this isn't an API function). Objects can be - opened multiple times simultaneously through the same name or, - if the object has hard links, through other names. The name of - an object cannot be removed from a group if the object is opened - through that group (although the name can change within the - group). - -

      Below the API, object attributes can be read without opening - the object; object attributes cannot change without first - opening that object. The one exception is that the contents of a - group can change without opening the group. - -


      -

      Building a hierarchy from a flat namespace

      - -

      Assuming we have a flat name space (that is, the root object is - a group which contains names for all other objects in the file - and none of those objects are groups), then we can build a - hierarchy of groups that also refer to the objects. - -

      The file initially contains `foo' `bar' `baz' in the root - group. We wish to add groups `grp1' and `grp2' so that `grp1' - contains objects `foo' and `baz' and `grp2' contains objects - `bar' and `baz' (so `baz' appears in both groups). - -

      In either case below, one might want to move the flat objects - into some other group (like `flat') so their names don't - interfere with the rest of the hierarchy (or move the hierarchy - into a directory called `/hierarchy'). - -

      with symbolic links

      - -

      Create group `grp1' and add symbolic links called `foo' whose - value is `/foo' and `baz' whose value is `/baz'. Similarly for - `grp2'. - -

      Accessing `grp1/foo' involves searching the root group for - the name `grp1', then searching that group for `foo', then - searching the root directory for `foo'. Alternatively, one - could change working groups to the grp1 group and then ask for - `foo' which searches `grp1' for the name `foo', then searches - the root group for the name `foo'. - -

      Deleting `/grp1/foo' deletes the symbolic link without - affecting the `/foo' object. Deleting `/foo' leaves the - `/grp1/foo' link dangling. - -

      with hard links

      - -

      Creating the hierarchy is the same as with symbolic links. - -

      Accessing `/grp1/foo' searches the root group for the name - `grp1', then searches that group for the name `foo'. If the - current working group is `/grp1' then we just search for the - name `foo'. - -

      Deleting `/grp1/foo' leaves `/foo' and vice versa. - -

      the code

      - -

      Depending on the eventual API... - -

      -H5Gcreate (file_id, "/grp1");
      -H5Glink (file_id, H5G_HARD, "/foo", "/grp1/foo");
      -    
      - - or - -
      -group_id = H5Gcreate (root_id, "grp1");
      -H5Glink (file_id, H5G_HARD, root_id, "foo", group_id, "foo");
      -H5Gclose (group_id);
      -    
      - - -
      -

      Building a flat namespace from a hierarchy

      - -

      Similar to abvoe, but in this case we have to watch out that - we don't get two names which are the same: what happens to - `/grp1/baz' and `/grp2/baz'? If they really refer to the same - object then we just have `/baz', but if they point to two - different objects what happens? - -

      The other thing to watch out for cycles in the graph when we - traverse it to build the flat namespace. - -


      -

      Listing the Group Contents

      - -

      Two things to watch out for are that the group contents don't - appear to change in a manner which would confuse the - application, and that listing everything in a group is as - efficient as possible. - -

      Method A

      - -

      Query the number of things in a group and then query each item - by index. A trivial implementation would be O(n*n) and wouldn't - protect the caller from changes to the directory which move - entries around and therefore change their indices. - -

      -n = H5GgetNumContents (group_id);
      -for (i=0; i<n; i++) {
      -   H5GgetNameByIndex (group_id, i, ...); /*don't worry about args yet*/
      -}
      -    
      - -

      Method B

      - -

      The API contains a single function that reads all information - from the specified group and returns that info through an array. - The caller is responsible for freeing the array allocated by the - query and the things to which it points. This also makes it - clear the the returned value is a snapshot of the group which - doesn't change if the group is modified. - -

      -n = H5Glist (file_id, "/grp1", info, ...);
      -for (i=0; i<n; i++) {
      -   printf ("name = %s\n", info[i].name);
      -   free (info[i].name); /*and maybe other fields too?*/
      -}
      -free (info);
      -    
      - - Notice that it would be difficult to expand the info struct since - its definition is part of the API. - -

      Method C

      - -

      The caller asks for a snapshot of the group and then accesses - items in the snapshot through various query-by-index API - functions. When finished, the caller notifies the library that - it's done with the snapshot. The word "snapshot" makes it clear - that subsequent changes to the directory will not be reflected in - the shapshot_id. - -

      -snapshot_id = H5Gsnapshot (group_id); /*or perhaps group_name */
      -n = H5GgetNumContents (snapshot_id);
      -for (i=0; i<n; i++) {
      -   H5GgetNameByIndex (shapshot_id, i, ...);
      -}
      -H5Grelease (shapshot_id); 
      -    
      - - In fact, we could allow the user to leave off the H5Gsnapshot and - H5Grelease and use group_id in the H5GgetNumContents and - H5GgetNameByIndex so they can choose between Method A and Method - C. - -
      -

      An implementation of Method C

      - -
      -
      hid_t H5Gshapshot (hid_t group_id) -
      Opens every object in the specified group and stores the - handles in an array managed by the library (linear-time - operation). Open object handles are essentialy symbol table - entries with a little extra info (symbol table entries cache - certain things about the object which are also found in the - object header). Because the objects are open (A) they cannot be - removed from the group, (B) querying the object returns the - latest info even if something else has that object open, (C) - if the object is renamed within the group then its name with - H5GgetNameByIndex is changed. Adding new entries - to a group doesn't affect the snapshot. - -
      char *H5GgetNameByIndex (hid_t shapshot_id, int - index) -
      Uses the open object handle from entry index of - the snapshot array to get the object name. This is a - constant-time operation. The name is updated automatically if - the object is renamed within the group. - -
      H5Gget<whatever>ByIndex...() -
      Uses the open object handle from entry index, - which is just a symbol table entry, and reads the appropriate - object header message(s) which might be cached in the symbol - table entry. This is a constant-time operation if cached, - linear in the number of messages if not cached. - -
      H5Grelease (hid_t snapshot_id) -
      Closes each object refered to by the snapshot and then frees - the snapshot array. This is a linear-time operation. -
      - -
      -

      To return char* or some HDF5 string type.

      - -

      In either case, the caller has to release resources associated - with the return value, calling free() or some HDF5 function. - -

      Names in the current implementation of the H5G package don't - contain embedded null characters and are always null terminated. - -

      Eventually the caller probably wants a char* so it - can pass it to some non-HDF5 function, does that require - strdup'ing the string again? Then the caller has to free() the - the char* and release the DHF5 string. - -


      -
      Robb Matzke
      - - -Last modified: Fri Oct 3 09:32:10 EST 1997 - - - diff --git a/doc/html/storage.html b/doc/html/storage.html deleted file mode 100644 index 87ea54d..0000000 --- a/doc/html/storage.html +++ /dev/null @@ -1,274 +0,0 @@ - - - - Raw Data Storage in HDF5 - - - -

      Raw Data Storage in HDF5

      - -

      This document describes the various ways that raw data is - stored in an HDF5 file and the object header messages which - contain the parameters for the storage. - -

      Raw data storage has three components: the mapping from some - logical multi-dimensional element space to the linear address - space of a file, compression of the raw data on disk, and - striping of raw data across multiple files. These components - are orthogonal. - -

      Some goals of the storage mechanism are to be able to - efficently store data which is: - -

      -
      Small -
      Small pieces of raw data can be treated as meta data and - stored in the object header. This will be achieved by storing - the raw data in the object header with message 0x0006. - Compression and striping are not supported in this case. - -
      Complete Large -
      The library should be able to store large arrays - contiguously in the file provided the user knows the final - array size a priori. The array can then be read/written in a - single I/O request. This is accomplished by describing the - storage with object header message 0x0005. Compression and - striping are not supported in this case. - -
      Sparse Large -
      A large sparse raw data array should be stored in a manner - that is space-efficient but one in which any element can still - be accessed in a reasonable amount of time. Implementation - details are below. - -
      Dynamic Size -
      One often doesn't have prior knowledge of the size of an - array. It would be nice to allow arrays to grow dynamically in - any dimension. It might also be nice to allow the array to - grow in the negative dimension directions if convenient to - implement. Implementation details are below. - -
      Subslab Access -
      Some multi-dimensional arrays are almost always accessed by - subslabs. For instance, a 2-d array of pixels might always be - accessed as smaller 1k-by-1k 2-d arrays always aligned on 1k - index values. We should be able to store the array in such a - way that striding though the entire array is not necessary. - Subslab access might also be useful with compression - algorithms where each storage slab can be compressed - independently of the others. Implementation details are below. - -
      Compressed -
      Various compression algorithms can be applied to the entire - array. We're not planning to support separate algorithms (or a - single algorithm with separate parameters) for each chunk - although it would be possible to implement that in a manner - similar to the way striping across files is - implemented. - -
      Striped Across Files -
      The array access functions should support arrays stored - discontiguously across a set of files. -
      - -

      Implementation of Indexed Storage

      - -

      The Sparse Large, Dynamic Size, and Subslab Access methods - share so much code that they can be described with a single - message. The new Indexed Storage Message (0x0008) - will replace the old Chunked Object (0x0009) and - Sparse Object (0x000A) Messages. - -

      -

      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
      - The Format of the Indexed Storage Message -
      bytebytebytebyte

      Address of B-tree

      Number of DimensionsReservedReservedReserved
      Reserved (4 bytes)
      Alignment for Dimension 0 (4 bytes)
      Alignment for Dimension 1 (4 bytes)
      ...
      Alignment for Dimension N (4 bytes)
      -
      - -

      The alignment fields indicate the alignment in logical space to - use when allocating new storage areas on disk. For instance, - writing every other element of a 100-element one-dimensional - array (using one HDF5 I/O partial write operation per element) - that has unit storage alignment would result in 50 - single-element, discontiguous storage segments. However, using - an alignment of 25 would result in only four discontiguous - segments. The size of the message varies with the number of - dimensions. - -

      A B-tree is used to point to the discontiguous portions of - storage which has been allocated for the object. All keys of a - particular B-tree are the same size and are a function of the - number of dimensions. It is therefore not possible to change the - dimensionality of an indexed storage array after its B-tree is - created. - -

      -

      - - - - - - - - - - - - - - - - - - - - - - - - -
      - The Format of a B-Tree Key -
      bytebytebytebyte
      External File Number or Zero (4 bytes)
      Chunk Offset in Dimension 0 (4 bytes)
      Chunk Offset in Dimension 1 (4 bytes)
      ...
      Chunk Offset in Dimension N (4 bytes)
      -
      - -

      The keys within a B-tree obey an ordering based on the chunk - offsets. If the offsets in dimension-0 are equal, then - dimension-1 is used, etc. The External File Number field - contains a 1-origin offset into the External File List message - which contains the name of the external file in which that chunk - is stored. - -

      Implementation of Striping

      - -

      The indexed storage will support arbitrary striping at the - chunk level; each chunk can be stored in any file. This is - accomplished by using the External File Number field of an - indexed storage B-tree key as a 1-origin offset into an External - File List Message (0x0009) which takes the form: - -

      -

      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
      - The Format of the External File List Message -
      bytebytebytebyte

      Name Heap Address

      Number of Slots Allocated (4 bytes)
      Number of File Names (4 bytes)
      Byte Offset of Name 1 in Heap (4 bytes)
      Byte Offset of Name 2 in Heap (4 bytes)
      ...

      Unused Slot(s)

      -
      - -

      Each indexed storage array that has all or part of its data - stored in external files will contain a single external file - list message. The size of the messages is determined when the - message is created, but it may be possible to enlarge the - message on demand by moving it. At this time, it's not possible - for multiple arrays to share a single external file list - message. - -

      -
      - H5O_efl_t *H5O_efl_new (H5G_entry_t *object, intn - nslots_hint, intn heap_size_hint) - -
      Adds a new, empty external file list message to an object - header and returns a pointer to that message. The message - acts as a cache for file descriptors of external files that - are open. - -

      - intn H5O_efl_index (H5O_efl_t *efl, const char *filename) - -
      Gets the external file index number for a particular file name. - If the name isn't in the external file list then it's added to - the H5O_efl_t struct and immediately written to the object - header to which the external file list message belongs. Name - comparison is textual. Each name should be relative to the - directory which contains the HDF5 file. - -

      - H5F_low_t *H5O_efl_open (H5O_efl_t *efl, intn index, uintn mode) - -
      Gets a low-level file descriptor for an external file. The - external file list caches file descriptors because we might - have many more external files than there are file descriptors - available to this process. The caller should not close this file. - -

      - herr_t H5O_efl_release (H5O_efl_t *efl) - -
      Releases an external file list, closes all files - associated with that list, and if the list has been modified - since the call to H5O_efl_new flushes the message - to disk. -
      - -
      -
      Robb Matzke
      - - -Last modified: Tue Nov 25 12:36:50 EST 1997 - - - diff --git a/doc/html/symtab b/doc/html/symtab deleted file mode 100644 index a657729..0000000 --- a/doc/html/symtab +++ /dev/null @@ -1,313 +0,0 @@ -A number of issues involving caching of object header messages in -symbol table entries must be resolved. - -What is the motivation for these changes? - - If we make objects completely independent of object name it allows - us to refer to one object by multiple names (a concept called hard - links in Unix file systems), which in turn provides an easy way to - share data between datasets. - - Every object in an HDF5 file has a unique, constant object header - address which serves as a handle (or OID) for the object. The - object header contains messages which describe the object. - - HDF5 allows some of the object header messages to be cached in - symbol table entries so that the object header doesn't have to be - read from disk. For instance, an entry for a directory caches the - directory disk addresses required to access that directory, so the - object header for that directory is seldom read. - - If an object has multiple names (that is, a link count greater than - one), then it has multiple symbol table entries which point to it. - All symbol table entries must agree on header messages. The - current mechanism is to turn off the caching of header messages in - symbol table entries when the header link count is more than one, - and to allow caching once the link count returns to one. - - However, in the current implementation, a package is allowed to - copy a symbol table entry and use it as a private cache for the - object header. This doesn't work for a number of reasons (all but - one require a `delete symbol entry' operation). - - 1. If two packages hold copies of the same symbol table entry, - they don't notify each other of changes to the symbol table - entry. Eventually, one package reads a cached message and - gets the wrong value because the other package changed the - message in the object header. - - 2. If one package holds a copy of the symbol table entry and - some other part of HDF5 removes the object and replaces it - with some other object, then the original package will - continue to access the non-existent object using the new - object header. - - 3. If one package holds a copy of the symbol table entry and - some other part of HDF5 (re)moves the directory which - contains the object, then the package will be unable to - update the symbol table entry with the new cached - data. Packages that refer to the object by the new name will - use old cached data. - - -The basic problem is that there may be multiple copies of the object -symbol table entry floating around in the code when there should -really be at most one per hard link. - - Level 0: A copy may exist on disk as part of a symbol table node, which - is a small 1d array of symbol table entries. - - Level 1: A copy may be cached in memory as part of a symbol table node - in the H5Gnode.c file by the H5AC layer. - - Level 2a: Another package may be holding a copy so it can perform - fast lookup of any header messages that might be cached in - the symbol table entry. It can't point directly to the - cached symbol table node because that node can dissappear - at any time. - - Level 2b: Packages may hold more than one copy of a symbol table - entry. For instance, if H5D_open() is called twice for - the same name, then two copies of the symbol table entry - for the dataset exist in the H5D package. - -How can level 2a and 2b be combined? - - If package data structures contained pointers to symbol table - entries instead of copies of symbol table entries and if H5G - allocated one symbol table entry per hard link, then it's trivial - for Level 2a and 2b to benefit from one another's actions since - they share the same cache. - -How does this work conceptually? - - Level 2a and 2b must notify Level 1 of their intent to use (or stop - using) a symbol table entry to access an object header. The - notification of the intent to access an object header is called - `opening' the object and releasing the access is `closing' the - object. - - Opening an object requires an object name which is used to locate - the symbol table entry to use for caching of object header - messages. The return value is a handle for the object. Figure 1 - shows the state after Dataset1 opens Object with a name that maps - through Entry1. The open request created a copy of Entry1 called - Shadow1 which exists even if SymNode1 is preempted from the H5AC - layer. - - ______ - Object / \ - SymNode1 +--------+ | - +--------+ _____\ | Header | | - | | / / +--------+ | - +--------+ +---------+ \______/ - | Entry1 | | Shadow1 | /____ - +--------+ +---------+ \ \ - : : \ - +--------+ +----------+ - | Dataset1 | - +----------+ - FIGURE 1 - - - - The SymNode1 can appear and disappear from the H5AC layer at any - time without affecting the Object Header data cached in the Shadow. - The rules are: - - * If the SymNode1 is present and is about to disappear and the - Shadow1 dirty bit is set, then Shadow1 is copied over Entry1, the - Entry1 dirty bit is set, and the Shadow1 dirty bit is cleared. - - * If something requests a copy of Entry1 (for a read-only peek - request), and Shadow1 exists, then a copy (not pointer) of Shadow1 - is returned instead. - - * Entry1 cannot be deleted while Shadow1 exists. - - * Entry1 cannot change directly if Shadow1 exists since this means - that some other package has opened the object and may be modifying - it. I haven't decided if it's useful to ever change Entry1 - directly (except of course within the H5G layer itself). - - * Shadow1 is created when Dataset1 `opens' the object through - Entry1. Dataset1 is given a pointer to Shadow1 and Shadow1's - reference count is incremented. - - * When Dataset1 `closes' the Object the Shadow1 reference count is - decremented. When the reference count reaches zero, if the - Shadow1 dirty bit is set, then Shadow1's contents are copied to - Entry1, and the Entry1 dirty bit is set. Shadow1 is then deleted - if its reference count is zero. This may require reading SymNode1 - back into the H5AC layer. - -What happens when another Dataset opens the Object through Entry1? - - If the current state is represented by the top part of Figure 2, - then Dataset2 will be given a pointer to Shadow1 and the Shadow1 - reference count will be incremented to two. The Object header link - count remains at one so Object Header messages continue to be cached - by Shadow1. Dataset1 and Dataset2 benefit from one another - actions. The resulting state is represented by Figure 2. - - _____ - SymNode1 Object / \ - +--------+ _____\ +--------+ | - | | / / | Header | | - +--------+ +---------+ +--------+ | - | Entry1 | | Shadow1 | /____ \_____/ - +--------+ +---------+ \ \ - : : _ \ - +--------+ |\ +----------+ - \ | Dataset1 | - \________ +----------+ - \ \ - +----------+ | - | Dataset2 | |- New Dataset - +----------+ | - / - FIGURE 2 - - -What happens when the link count for Object increases while Dataset -has the Object open? - - SymNode2 - +--------+ - SymNode1 Object | | - +--------+ ____\ +--------+ /______ +--------+ - | | / / | header | \ `| Entry2 | - +--------+ +---------+ +--------+ +--------+ - | Entry1 | | Shadow1 | /____ : : - +--------+ +---------+ \ \ +--------+ - : : \ - +--------+ +----------+ \________________/ - | Dataset1 | | - +----------+ New Link - - FIGURE 3 - - The current state is represented by the left part of Figure 3. To - create a new link the Object Header had to be located by traversing - through Entry1/Shadow1. On the way through, the Entry1/Shadow1 - cache is invalidated and the Object Header link count is - incremented. Entry2 is then added to SymNode2. - - Since the Object Header link count is greater than one, Object - header data will not be cached in Entry1/Shadow1. - - If the initial state had been all of Figure 3 and a third link is - being added and Object is open by Entry1 and Entry2, then creation - of the third link will invalidate the cache in Entry1 or Entry2. It - doesn't matter which since both caches are already invalidated - anyway. - -What happens if another Dataset opens the same object by another name? - - If the current state is represented by Figure 3, then a Shadow2 is - created and associated with Entry2. However, since the Object - Header link count is more than one, nothing gets cached in Shadow2 - (or Shadow1). - -What happens if the link count decreases? - - If the current state is represented by all of Figure 3 then it isn't - possible to delete Entry1 because the object is currently open - through that entry. Therefore, the link count must have - decreased because Entry2 was removed. - - As Dataset1 reads/writes messages in the Object header they will - begin to be cached in Shadow1 again because the Object header link - count is one. - -What happens if the object is removed while it's open? - - That operation is not allowed. - -What happens if the directory containing the object is deleted? - - That operation is not allowed since deleting the directory requires - that the directory be empty. The directory cannot be emptied - because the open object cannot be removed from the directory. - -What happens if the object is moved? - - Moving an object is a process consisting of creating a new - hard-link with the new name and then deleting the old name. - This will fail if the object is open. - -What happens if the directory containing the entry is moved? - - The entry and the shadow still exist and are associated with one - another. - -What if a file is flushed or closed when objects are open? - - Flushing a symbol table with open objects writes correct information - to the file since Shadow is copied to Entry before the table is - flushed. - - Closing a file with open objects will create a valid file but will - return failure. - -How is the Shadow associated with the Entry? - - A symbol table is composed of one or more symbol nodes. A node is a - small 1-d array of symbol table entries. The entries can move - around within a node and from node-to-node as entries are added or - removed from the symbol table and nodes can move around within a - symbol table, being created and destroyed as necessary. - - Since a symbol table has an object header with a unique and constant - file offset, and since H5G contains code to efficiently locate a - symbol table entry given it's name, we use these two values as a key - within a shadow to associate the shadow with the symbol table - entry. - - struct H5G_shadow_t { - haddr_t stab_addr; /*symbol table header address*/ - char *name; /*entry name wrt symbol table*/ - hbool_t dirty; /*out-of-date wrt stab entry?*/ - H5G_entry_t ent; /*my copy of stab entry */ - H5G_entry_t *main; /*the level 1 entry or null */ - H5G_shadow_t *next, *prev; /*other shadows for this stab*/ - }; - - The set of shadows will be organized in a hash table of linked - lists. Each linked list will contain the shadows associated with a - particular symbol table header address and the list will be sorted - lexicographically. - - Also, each Entry will have a pointer to the corresponding Shadow or - null if there is no shadow. - - When a symbol table node is loaded into the main cache, we look up - the linked list of shadows in the shadow hash table based on the - address of the symbol table object header. We then traverse that - list matching shadows with symbol table entries. - - We assume that opening/closing objects will be a relatively - infrequent event compared with loading/flushing symbol table - nodes. Therefore, if we keep the linked list of shadows sorted it - costs O(N) to open and close objects where N is the number of open - objects in that symbol table (instead of O(1)) but it costs only - O(N) to load a symbol table node (instead of O(N^2)). - -What about the root symbol entry? - - Level 1 storage for the root symbol entry is always available since - it's stored in the hdf5_file_t struct instead of a symbol table - node. However, the contents of that entry can move from the file - handle to a symbol table node by H5G_mkroot(). Therefore, if the - root object is opened, we keep a shadow entry for it whose - `stab_addr' field is zero and whose `name' is null. - - For this reason, the root object should always be read through the - H5G interface. - -One more key invariant: The H5O_STAB message in a symbol table header -never changes. This allows symbol table entries to cache the H5O_STAB -message for the symbol table to which it points without worrying about -whether the cache will ever be invalidated. - - diff --git a/doc/html/version.gif b/doc/html/version.gif deleted file mode 100644 index 41d4401..0000000 Binary files a/doc/html/version.gif and /dev/null differ diff --git a/doc/html/version.obj b/doc/html/version.obj deleted file mode 100644 index 96b5b7f..0000000 --- a/doc/html/version.obj +++ /dev/null @@ -1,96 +0,0 @@ -%TGIF 3.0-p5 -state(0,33,100,0,0,0,8,1,9,1,1,0,2,1,0,1,0,'Courier',0,17,0,0,0,10,0,0,1,1,0,16,0,0,1,1,1,0,1088,1408,0,0,2880). -% -% @(#)$Header$ -% %W% -% -unit("1 pixel/pixel"). -page(1,"",1). -poly('black',2,[ - 128,128,128,448],0,3,1,0,0,0,0,0,12,5,0,0,0,'3','12','5', - "0",[ -]). -poly('black',2,[ - 128,128,128,64],0,3,1,1,0,0,2,0,12,5,0,0,0,'3','12','5', - "0",[ -]). -poly('black',2,[ - 128,448,128,512],0,3,1,4,0,0,2,0,12,5,0,0,0,'3','12','5', - "0",[ -]). -text('black',144,112,'Courier',0,17,1,0,0,1,42,14,22,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "1.3.30"]). -text('black',144,144,'Courier',0,17,1,0,0,1,42,14,30,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "1.3.31"]). -text('black',144,176,'Courier',0,17,1,0,0,1,42,14,32,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "1.3.32"]). -poly('black',2,[ - 256,208,256,448],0,3,1,34,0,0,0,0,12,5,0,0,0,'3','12','5', - "0",[ -]). -poly('black',2,[ - 256,448,256,512],0,3,1,36,0,0,2,0,12,5,0,0,0,'3','12','5', - "0",[ -]). -poly('black',2,[ - 128,192,256,208],1,1,1,37,0,0,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -text('black',144,224,'Courier',0,17,1,0,0,1,42,14,41,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "1.3.33"]). -text('black',144,256,'Courier',0,17,1,0,0,1,42,14,43,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "1.3.34"]). -text('black',272,224,'Courier',0,17,1,0,0,1,35,14,45,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "1.5.0"]). -text('black',272,256,'Courier',0,17,1,0,0,1,35,14,47,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "1.5.1"]). -text('black',272,288,'Courier',0,17,1,0,0,1,35,14,49,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "1.5.2"]). -text('black',272,320,'Courier',0,17,1,0,0,1,35,14,51,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "1.5.3"]). -text('black',144,288,'Courier',0,17,1,0,0,1,42,14,53,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "1.3.35"]). -text('black',144,320,'Courier',0,17,1,0,0,1,35,14,57,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "1.4.0"]). -text('black',144,368,'Courier',0,17,1,0,0,1,35,14,59,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "1.4.1"]). -text('black',272,192,'Helvetica',0,17,1,0,0,1,144,15,67,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "New development branch"]). -text('black',144,64,'Helvetica',0,17,1,0,0,1,163,15,69,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Original development branch"]). -text('black',16,208,'Helvetica',0,17,2,0,0,1,87,30,71,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Feature Freeze", - "at this point."]). -text('black',16,320,'Helvetica',0,17,2,0,0,1,84,30,73,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Public Release", - "at this point."]). -poly('black',2,[ - 104,208,128,208],1,1,1,77,0,0,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -poly('black',2,[ - 104,320,128,320],1,1,1,78,0,0,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -poly('black',2,[ - 256,336,128,352],1,1,1,79,0,0,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -text('black',320,368,'Helvetica',0,17,3,0,0,1,137,45,82,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Merge a bug fix from the", - "development branch to", - "the release branch."]). -box('black',312,368,464,416,0,1,1,87,0,0,0,0,0,'1',[ -]). -poly('black',4,[ - 312,392,240,384,296,344,232,344],1,1,1,90,1,0,0,0,8,3,0,0,0,'1','8','3', - "6",[ -]). -box('black',8,208,104,240,0,1,1,95,0,0,0,0,0,'1',[ -]). -box('black',8,320,104,352,0,1,1,98,0,0,0,0,0,'1',[ -]). -text('black',144,408,'Courier',0,17,1,0,0,1,35,14,102,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "1.4.2"]). -box('black',0,40,480,528,0,1,1,104,0,0,0,0,0,'1',[ -]). diff --git a/doc/src/Copyright.html b/doc/src/Copyright.html deleted file mode 100644 index f295867..0000000 --- a/doc/src/Copyright.html +++ /dev/null @@ -1,76 +0,0 @@ - - -HDF5 Copyright Notice - - - - - -
      - -

      Copyright Notice and Statement for -
      -NCSA Hierarchical Data Format (HDF) Software Library and Utilities

      -
      -

      - - -NCSA Hierarchical Data Format (HDF) Software Library and Utilities -
      -Copyright 1998 the Board of Trustees of the University of Illinois -
      -All rights reserved. -

      - -Contributors: National Center for Supercomputing Applications (NCSA) at -the University of Illinois, Lawrence Livermore Nat'l Laboratory (LLNL), -Sandia National Laboratories (SNL), Los Alamos National Laboratory (LANL), -Jean-loup Gailly and Mark Adler (gzip library) -

      - -Redistribution and use in source and binary forms, with or without -modification, are permitted for any purpose (including commercial purposes) -provided that the following conditions are met: -

      - -

        -
      1. Redistributions of source code must retain the above copyright notice, -this list of conditions and the following disclaimer. - -
      2. Redistributions in binary form must reproduce the above copyright -notice, this list of conditions and the following disclaimer in the -documentation and/or materials provided with the distribution. - -
      3. In addition, redistributions of modified forms of the source or binary -code must carry prominent notices stating that the original code was -changed and the date of the change. - -
      4. All publications or advertising materials mentioning features or use of -this software must acknowledge that it was developed by the National Center -for Supercomputing Applications at the University of Illinois, and credit -the Contributors. - -
      5. Neither the name of the University nor the names of the Contributors may -be used to endorse or promote products derived from this software without -specific prior written permission from the University or the Contributors. - -
      6. THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY AND THE CONTRIBUTORS "AS IS" -WITH NO WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED. In no event -shall the University or the Contributors be liable for any damages suffered -by the users arising out of the use of this software, even if advised of -the possibility of such damage. - -
      - - -
      - -
      -HDF Help Desk - -
      -Last modified: 8 September 1998 - - - - diff --git a/doc/src/Glossary.html b/doc/src/Glossary.html deleted file mode 100755 index 5b04f9c..0000000 --- a/doc/src/Glossary.html +++ /dev/null @@ -1,109 +0,0 @@ - -HDF5 Draft Glossary - - - - -
      -
      -HDF5 Reference Manual   -H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5P   -H5S   -H5T   -H5Z   -Glossary -
      -
      - -
      -

      HDF5 Glossary

      -
      - -(Under construction! - This is the bare beginning of a Glossary to accompany the HDF5 - documentation; it is by no means complete.) - - - - -
      - -basic data types: -
        -
      • (Some data types may change substantially en route to - Release 1.2.) -
      • char - 8-bit character (only for ASCII information) -
      • int8 - 8-bit signed integer -
      • uint8 - 8-bit unsigned integer -
      • int16 - 16-bit signed integer -
      • uint16 - 16-bit unsigned integer -
      • int32 - 32-bit signed integer -
      • uint32 - 32-bit unsigned integer -
      • intn - "native" signed integer -
      • uintn - "native" unsigned integer -
      • int64 - 64-bit signed integer (new) -
      • uint64 - 64-bit unsigned integer (new) -
      • float32 - 32-bit IEEE float -
      • float64 - 64-bit IEEE float -
      - -Complex data types: -
        -
      • (Some data types may change substantially en route to - Release 1.2.) -
      • hid_t - 32-bit unsigned integer used as ID for memory objects -
      • hoid_t - 32-bit unsigned integer (currently) used as ID for disk-based - objects -
      • hbool_t - boolean to indicate true/false/error codes from functions -
      • herr_t - 32-bit integer to indicate succeed/fail codes from functions -
      - -disk I/O data types: -
        -
      • (Some data types may change substantially en route to - Release 1.2.) -
      • hoff_t - (64-bit?) offset on disk in bytes -
      • hlen_t - (64-bit?) length on disk in bytes -
      - -
      -
      -HDF5 Reference Manual   -H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5P   -H5S   -H5T   -H5Z   -Glossary -
      -
      - -
      -HDF Help Desk - -
      -Last modified: 14 July 1998 - - - diff --git a/doc/src/H5.intro.doc b/doc/src/H5.intro.doc deleted file mode 100755 index 6dc7214..0000000 Binary files a/doc/src/H5.intro.doc and /dev/null differ diff --git a/doc/src/RM_H5.html b/doc/src/RM_H5.html deleted file mode 100755 index ad2cb8d..0000000 --- a/doc/src/RM_H5.html +++ /dev/null @@ -1,223 +0,0 @@ - - -HDF5/H5 Draft API Specification - - - - -
      -
      -HDF5 Reference Manual  -H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5P   -H5S   -H5T   -H5Z   -Tools   - -
      -
      - -
      -

      H5: General Library Functions

      -
      - -These functions serve general-purpose needs of the HDF5 library -and it users. - - - -
      - -       - -       - -
      - - -
      -
      -
      Name: H5open -
      Signature: -
      herr_t H5open(void) -
      Purpose: -
      Flushes all data to disk, closes file identifiers, and cleans up memory. -
      Description: -
      H5open initialize the library. This function is - normally called automatically, but if you find that an - HDF5 library function is failing inexplicably, try calling - this function first. -
      Parameters: -
      -
      None. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5close -
      Signature: -
      herr_t H5close(void) -
      Purpose: -
      Flushes all data to disk, closes file identifiers, and cleans up memory. -
      Description: -
      H5close flushes all data to disk, - closes all file identifiers, and cleans up all memory used by - the library. This function is generall called when the - application calls exit, but may be called earlier - in event of an emergency shutdown or out of desire to free all - resources used by the HDF5 library. -
      Parameters: -
      -
      None. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5dont_atexit -
      Signature: -
      herr_t H5dont_atexit(void) -
      Purpose: -
      Instructs library not to install atexit cleanup routine. -
      Description: -
      H5dont_atexit indicates to the library that an - atexit() cleanup routine should not be installed. - The major purpose for this is in situations where the - library is dynamically linked into an application and is - un-linked from the application before exit() gets - called. In those situations, a routine installed with - atexit() would jump to a routine which was - no longer in memory, causing errors. -

      - In order to be effective, this routine must be called - before any other HDF function calls, and must be called each - time the library is loaded/linked into the application - (the first time and after it's been un-loaded). -

      Parameters: -
      -
      None. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5get_libversion -
      Signature: -
      herr_t H5get_libversion(unsigned *majnum, - unsigned *minnum, - unsigned *relnum - ) -
      Purpose: -
      Returns the HDF library release number. -
      Description: -
      H5get_libversion retrieves the major, minor, and release - numbers of the version of the HDF library which is linked to - the application. -
      Parameters: -
      -
      unsigned *majnum -
      The major version of the library. -
      unsigned *minnum -
      The minor version of the library. -
      unsigned *relnum -
      The release number of the library. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5check_version -
      Signature: -
      herr_t H5check_version(unsigned majnum, - unsigned minnum, - unsigned relnum - ) -
      Purpose: -
      -
      Description: -
      H5check_version verifies that the arguments match the - version numbers compiled into the library. This function is intended - to be called from user to verify that the versions of header files - compiled into the application match the version of the HDF5 library. -

      - Due to the risks of data corruption or segmentation faults, - H5check_version causes the application to abort if the - version numbers do not match. -

      Parameters: -
      -
      unsigned *majnum -
      The major version of the library. -
      unsigned *minnum -
      The minor version of the library. -
      unsigned *relnum -
      The release number of the library. -
      unsigned *patnum -
      The patch number of the library. -
      -
      Returns: -
      Returns SUCCEED (0) if successful. - Upon failure, this function causes the application to abort. -
      - - -
      -
      -HDF5 Reference Manual  -H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5P   -H5S   -H5T   -H5Z   -Tools   - -
      -
      - -
      -HDF Help Desk - -
      -Last modified: 2 September 1998 - - - diff --git a/doc/src/RM_H5A.html b/doc/src/RM_H5A.html deleted file mode 100644 index bda4333..0000000 --- a/doc/src/RM_H5A.html +++ /dev/null @@ -1,523 +0,0 @@ - - -HDF5/H5A Draft API Specification - - - - -
      -
      -HDF5 Reference Manual  -H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5P   -H5S   -H5T   -H5Z   -Tools   - -
      -
      - -
      -

      H5A: Attribute Interface

      -
      - -

      Attribute API Functions

      - -These functions create and manipulate attributes -and information about attributes. - - - -
      - -       - -       - -
      - -

      -The Attribute interface, H5A, is primarily designed to easily allow -small datasets to be attached to primary datasets as metadata information. -Additional goals for the H5A interface include keeping storage requirement -for each attribute to a minimum and easily sharing attributes among -datasets. -

      -Because attributes are intended to be small objects, large datasets -intended as additional information for a primary dataset should be -stored as supplemental datasets in a group with the primary dataset. -Attributes can then be attached to the group containing everything -to indicate a particular type of dataset with supplemental datasets -is located in the group. How small is "small" is not defined by the -library and is up to the user's interpretation. -

      -See Attributes in the -HDF5 User's Guide for further information. - -


      -
      -
      Name: H5Acreate -
      Signature: -
      hid_t H5Acreate(hid_t loc_id, - const char *name, - hid_t type_id, - hid_t space_id, - hid_t create_plist - ) -
      Purpose: -
      Creates a dataset as an attribute of another group, dataset, - or named datatype. -
      Description: -
      H5Acreate creates an attribute which is attached - to the object specified with loc_id. - loc_id is an identifier of a group, dataset, - or named datatype. The name specified with name - for each attribute for an object must be unique for that object. - The datatype and dataspace identifiers of the attribute, - type_id and space_id, respectively, - are created with the H5T and H5S interfaces, respectively. - Currently only simple dataspaces are allowed for attribute - dataspaces. The create_plist_id property list - is currently unused, but will be used int the future for optional - properties of attributes. The attribute identifier returned from - this function must be released with H5Aclose or - resource leaks will develop. Attempting to create an attribute - with the same name as an already existing attribute will fail, - leaving the pre-existing attribute in place. -
      Parameters: -
      -
      hid_t loc_id -
      IN: Object (dataset, group, or named datatype) to be attached to. -
      const char *name -
      IN: Name of attribute to create. -
      hid_t type_id -
      IN: Identifier of datatype for attribute. -
      hid_t space_id -
      IN: Identifier of dataspace for attribute. -
      hid_t create_plist -
      IN: Identifier of creation property list (currently not used). -
      -
      Returns: -
      Returns an attribute identifier if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Aopen_name -
      Signature: -
      hid_t H5Aopen_name(hid_t loc_id, - const char *name - ) -
      Purpose: -
      Opens an attribute specified by name. -
      Description: -
      H5Aopen_name opens an attribute specified by - its name, name, which is attached to the - object specified with loc_id. - The location object may be either a group, dataset, or - named datatype, which may have any sort of attribute. - The attribute identifier returned from this function must - be released with H5Aclose or resource leaks - will develop. -
      Parameters: -
      -
      hid_t loc_id -
      IN: Identifier of a group, dataset, or named datatype - atttribute to be attached to. -
      const char *name -
      IN: Attribute name. -
      -
      Returns: -
      Returns attribute identifier if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Aopen_idx -
      Signature: -
      hid_t H5Aopen_idx(hid_t loc_id, - unsigned int idx - ) -
      Purpose: -
      Opens the attribute specified by its index. -
      Description: -
      H5Aopen_idx opens an attribute which is attached - to the object specified with loc_id. - The location object may be either a group, dataset, or - named datatype, all of which may have any sort of attribute. - The attribute specified by the index, idx, - indicates the attribute to access. - The value of idx is a 0-based, non-negative integer. - The attribute identifier returned from this function must be - released with H5Aclose or resource leaks will develop. -
      Parameters: -
      -
      hid_t loc_id -
      IN: Identifier of the group, dataset, or named datatype - attribute to be attached to. -
      unsigned int idx -
      IN: Index of the attribute to open. -
      -
      Returns: -
      Returns attribute identifier if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Awrite -
      Signature: -
      herr_t H5Awrite(hid_t attr_id, - hid_t mem_type_id, - void *buf - ) -
      Purpose: -
      Writes data to an attribute. -
      Description: -
      H5Awrite writes an attribute, specified with - attr_id. The attribute's memory datatype - is specified with mem_type_id. The entire - attribute is written from buf to the file. -

      - Datatype conversion takes place at the time of a read or write - and is automatic. See the - Data Conversion - section of The Data Type Interface (H5T) in the - HDF5 User's Guide for a discussion of - data conversion, including the range of conversions currently - supported by the HDF5 libraries. -

      Parameters: -
      -
      hid_t attr_id -
      IN: Identifier of an attribute to write. -
      hid_t mem_type_id -
      IN: Identifier of the attribute datatype (in memory). -
      void *buf -
      IN: Data to be written. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Aread -
      Signature: -
      herr_t H5Aread(hid_t attr_id, - hid_t mem_type_id, - void *buf - ) -
      Purpose: -
      Reads an attribute. -
      Description: -
      H5Aread reads an attribute, specified with - attr_id. The attribute's memory datatype - is specified with mem_type_id. The entire - attribute is read into buf from the file. -

      - Datatype conversion takes place at the time of a read or write - and is automatic. See the - Data Conversion - section of The Data Type Interface (H5T) in the - HDF5 User's Guide for a discussion of - data conversion, including the range of conversions currently - supported by the HDF5 libraries. -

      Parameters: -
      -
      hid_t attr_id -
      IN: Identifier of an attribute to read. -
      hid_t mem_type_id -
      IN: Identifier of the attribute datatype (in memory). -
      void *buf -
      IN: Buffer for data to be read. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Aget_space -
      Signature: -
      hid_t H5Aget_space(hid_t attr_id) -
      Purpose: -
      Gets a copy of the dataspace for an attribute. -
      Description: -
      H5Aget_space retrieves a copy of the dataspace - for an attribute. The dataspace identifier returned from - this function must be released with H5Sclose - or resource leaks will develop. -
      Parameters: -
      -
      hid_t attr_id -
      IN: Identifier of an attribute. -
      -
      Returns: -
      Returns attribute dataspace identifier if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Aget_type -
      Signature: -
      hid_t H5Aget_type(hid_t attr_id) -
      Purpose: -
      Gets an attribute datatype. -
      Description: -
      H5Aget_type retrieves a copy of the datatype - for an attribute. -

      - The datatype is reopened if it is a named type before returning - it to the application. The datatypes returned by this function - are always read-only. If an error occurs when atomizing the - return datatype, then the datatype is closed. -

      - The datatype identifier returned from this function must be - released with H5Tclose or resource leaks will develop. -

      Parameters: -
      -
      hid_t attr_id -
      IN: Identifier of an attribute. -
      -
      Returns: -
      Returns a datatype identifier if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Aget_name -
      Signature: -
      size_t H5Aget_name(hid_t attr_id, - char *buf, - size_t buf_size - ) -
      Purpose: -
      Gets an attribute name. -
      Description: -
      H5Aget_name retrieves the name of an attribute - specified by the identifier, attr_id. - Up to buf_size characters are stored in - buf followed by a \0 string - terminator. If the name of the attribute is longer than - buf_size -1, the string terminator is stored in the - last position of the buffer to properly terminate the string. -
      Parameters: -
      -
      hid_t attr_id -
      IN: Identifier of the attribute. -
      char *buf -
      IN: Buffer to store name in. -
      size_t buf_size -
      IN: The size of the buffer to store the name in. -
      -
      Returns: -
      Returns the length of the attribute's name, which may be - longer than buf_size, if successful. - Otherwise returns FAIL (-1). -
      - - -
      -
      -
      Name: H5Aget_num_attrs -
      Signature: -
      int H5Aget_num_attrs(hid_t loc_id) -
      Purpose: -
      Determines the number of attributes attached to an object. -
      Description: -
      H5Aget_num_attrs returns the number of attributes - attached to the object specified by its identifier, - loc_id. - The object can be a group, dataset, or named datatype. -
      Parameters: -
      -
      hid_t loc_id -
      IN: Identifier of a group, dataset, or named datatype. -
      -
      Returns: -
      Returns the number of attributes if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Aiterate -
      Signature: -
      int H5Aiterate(hid_t loc_id, - unsigned * idx, - H5A_operator_t op, - void *op_data - ) -
      Purpose: -
      Calls a user's function for each attribute on an object. -
      Description: -
      H5Aiterate iterates over the attributes of - the object specified by its identifier, loc_id. - The object can be a group, dataset, or named datatype. - For each attribute of the object, the op_data - and some additional information specified below are passed - to the operator function op. - The iteration begins with the attribute specified by its - index, idx; the index for the next attribute - to be processed by the operator, op, is - returned in idx. - If idx is the null pointer, then all attributes - are processed. -

      - The prototype for H5A_operator_t is:
      - typedef herr_t (*H5A_operator_t)(hid_t loc_id, - const char *attr_name, - void *operator_data); - -

      - The operation receives the identifier for the group, dataset - or named datatype being iterated over, loc_id, the - name of the current attribute about the object, attr_name, - and the pointer to the operator data passed in to H5Aiterate, - op_data. The return values from an operator are: -

        -
      • Zero causes the iterator to continue, returning zero when all - attributes have been processed. -
      • Positive causes the iterator to immediately return that positive - value, indicating short-circuit success. The iterator can be - restarted at the next attribute. -
      • Negative causes the iterator to immediately return that value, - indicating failure. The iterator can be restarted at the next - attribute. -
      -
      Parameters: -
      -
      hid_t loc_id -
      IN: Identifier of a group, dataset or named datatype. -
      unsigned * idx -
      IN/OUT: Starting (IN) and ending (OUT) attribute index. -
      H5A_operator_t op -
      IN: User's function to pass each attribute to -
      void *op_data -
      IN/OUT: User's data to pass through to iterator operator function -
      -
      Returns: -
      If successful, returns the return value of the last operator - if it was non-zero, or zero if all attributes were processed. - Otherwise returns FAIL (-1). -
      - - -
      -
      -
      Name: H5Adelete -
      Signature: -
      herr_t H5Adelete(hid_t loc_id, - const char *name - ) -
      Purpose: -
      Deletes an attribute from a location. -
      Description: -
      H5Adelete removes the attribute specified by its - name, name, from a dataset, group, or named datatype. - This function should not be used when attribute identifiers are - open on loc_id as it may cause the internal indexes - of the attributes to change and future writes to the open - attributes to produce incorrect results. -
      Parameters: -
      -
      hid_t loc_id -
      IN: Identifier of the dataset, group, or named datatype - to have the attribute deleted from. -
      const char *name -
      IN: Name of the attribute to delete. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Aclose -
      Signature: -
      herr_t H5Aclose(hid_t attr_id) -
      Purpose: -
      Closes the specified attribute. -
      Description: -
      H5Aclose terminates access to the attribute - specified by its identifier, attr_id. - Further use of the attribute identifier will result in - undefined behavior. -
      Parameters: -
      -
      hid_t attr_id -
      IN: Attribute to release access to. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -HDF5 Reference Manual  -H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5P   -H5S   -H5T   -H5Z   -Tools   - -
      -
      - -
      -HDF Help Desk - -
      -Last modified: 1 September 1998 - - - diff --git a/doc/src/RM_H5D.html b/doc/src/RM_H5D.html deleted file mode 100644 index bb2c4dc..0000000 --- a/doc/src/RM_H5D.html +++ /dev/null @@ -1,431 +0,0 @@ - - -HDF5/H5D Draft API Specification - - - - -
      -
      -HDF5 Reference Manual  -H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5P   -H5S   -H5T   -H5Z   -Tools   - -
      -
      - -
      -

      H5D: Datasets Interface

      -
      - -

      Dataset Object API Functions

      - -These functions create and manipulate dataset objects, -and set and retrieve their constant or persistent properties. - - - -
      - -       - -       - -
      - - -
      -
      -
      Name: H5Dcreate -
      Signature: -
      hid_t H5Dcreate(hid_t loc_id, - const char *name, - hid_ttype_id, - hid_tspace_id, - hid_tcreate_plist_id - ) -
      Purpose: -
      Creates a dataset at the specified location. -
      Description: -
      H5Dcreate creates a data set with a name, - name, in the file or in the group specified by - the identifier loc_id. - The dataset has the datatype and dataspace identified by - type_id and space_id, respectively. - The specified datatype and dataspace are the datatype and - dataspace of the dataset as it will exist in the file, - which may be different than in application memory. - Dataset creation properties are specified by the argument - create_plist_id. -

      - create_plist_id is a H5P_DATASET_CREATE - property list created with H5Pcreate() and - initialized with the various functions described above. - H5Dcreate() returns a dataset identifier for success - or negative for failure. The identifier should eventually be - closed by calling H5Dclose() to release resources - it uses. -

      Parameters: -
      -
      hid_t loc_id -
      Identifier of the file or group to create the dataset within. -
      const char * name -
      The name of the dataset to create. -
      hid_t type_id -
      Identifier of the datatype to use when creating the dataset. -
      hid_t space_id -
      Identifier of the dataspace to use when creating the dataset. -
      hid_t create_plist_id -
      Identifier of the set creation property list. -
      -
      Returns: -
      Returns a dataset identifier if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Dopen -
      Signature: -
      hid_t H5Dopen(hid_t loc_id, - const char *name - ) -
      Purpose: -
      Opens an existing dataset. -
      Description: -
      H5Dopen opens an existing dataset for access in the file - or group specified in loc_id. name is - a dataset name and is used to identify the dataset in the file. -
      Parameters: -
      -
      hid_t loc_id -
      Identifier of the dataset to open or the file or group - to access the dataset within. -
      const char * name -
      The name of the dataset to access. -
      -
      Returns: -
      Returns a dataset identifier if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Dget_space -
      Signature: -
      hid_t H5Dget_space(hid_t dataset_id - ) -
      Purpose: -
      Returns an identifier for a copy of the dataspace for a dataset. -
      Description: -
      H5Dget_space returns an identifier for a copy of the - dataspace for a dataset. - The dataspace identifier should be released with the - H5Sclose() function. -
      Parameters: -
      -
      hid_t dataset_id -
      Identifier of the dataset to query. -
      -
      Returns: -
      Returns a dataspace identifier if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Dget_type -
      Signature: -
      hid_t H5Dget_type(hid_t dataset_id - ) -
      Purpose: -
      Returns an identifier for a copy of the datatype for a dataset. -
      Description: -
      H5Dget_type returns an identifier for a copy of the - datatype for a dataset. - The datatype should be released with the H5Tclose() function. -

      - If a dataset has a named datatype, then an identifier to the - opened datatype is returned. - Otherwise, the returned datatype is read-only. - If atomization of the datatype fails, then the datatype is closed. -

      Parameters: -
      -
      hid_t dataset_id -
      Identifier of the dataset to query. -
      -
      Returns: -
      Returns a datatype identifier if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Dget_create_plist -
      Signature: -
      hid_t H5Dget_create_plist(hid_t dataset_id - ) -
      Purpose: -
      Returns an identifier for a copy of the - dataset creation property list for a dataset. -
      Description: -
      H5Dget_create_plist returns an identifier for a - copy of the dataset creation property list for a dataset. - The creation property list identifier should be released with - the H5Pclose() function. -
      Parameters: -
      -
      hid_t dataset_id -
      Identifier of the dataset to query. -
      -
      Returns: -
      Returns a dataset creation property list identifier if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Dread -
      Signature: -
      herr_t H5Dread(hid_t dataset_id, - hid_t mem_type_id, - hid_t mem_space_id, - hid_t file_space_id, - hid_t xfer_plist_id, - void * buf - ) -
      Purpose: -
      Reads raw data from the specified dataset into buf, - converting from file datatype and dataspace to - memory datatype and dataspace. -
      Description: -
      H5Dread reads a (partial) dataset, specified by its - identifier dataset_id, from the file into the - application memory buffer buf. - Data transfer properties are defined by the argument - xfer_plist_id. - The memory datatype of the (partial) dataset is identified by - the identifier mem_type_id. - The part of the dataset to read is defined by - mem_space_id and file_space_id. -

      - file_space_id can be the constant H5S_ALL, - which indicates that the entire file data space is to be referenced. -

      - mem_space_id can be the constant H5S_ALL, - in which case the memory data space is the same as the file data space - defined when the dataset was created. -

      - The number of elements in the memory data space must match - the number of elements in the file data space. -

      - xfer_plist_id can be the constant H5P_DEFAULT, - in which case the default data transfer properties are used. - -

      - Datatype conversion takes place at the time of a read or write - and is automatic. See the - Data Conversion - section of The Data Type Interface (H5T) in the - HDF5 User's Guide for a discussion of - data conversion, including the range of conversions currently - supported by the HDF5 libraries. -

      Parameters: -
      -
      hid_t dataset_id -
      Identifier of the dataset read from. -
      hid_t mem_type_id -
      Identifier of the memory datatype. -
      hid_t mem_space_id -
      Identifier of the memory dataspace. -
      hid_t file_space_id -
      Identifier of the dataset's dataspace in the file. -
      hid_t xfer_plist_id -
      Identifier of a transfer property list for this I/O operation. -
      void * buf -
      Buffer to store data read from the file. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Dwrite -
      Signature: -
      herr_t H5Dwrite(hid_t dataset_id, - hid_t mem_type_id, - hid_t mem_space_id, - hid_t file_space_id, - hid_t xfer_plist_id, - const void * buf - ) -
      Purpose: -
      Writes raw data from an application buffer buf to - the specified dataset, converting from - memory datatype and dataspace to file datatype and dataspace. -
      Description: -
      H5Dwrite writes a (partial) dataset, specified by its - identifier dataset_id, from the - application memory buffer buf into the file. - Data transfer properties are defined by the argument - xfer_plist_id. - The memory datatype of the (partial) dataset is identified by - the identifier mem_type_id. - The part of the dataset to write is defined by - mem_space_id and file_space_id. -

      - file_space_id can be the constant H5S_ALL. - which indicates that the entire file data space is to be referenced. -

      - mem_space_id can be the constant H5S_ALL, - in which case the memory data space is the same as the file data space - defined when the dataset was created. -

      - The number of elements in the memory data space must match - the number of elements in the file data space. -

      - xfer_plist_id can be the constant H5P_DEFAULT. - in which case the default data transfer properties are used. -

      - Writing to an external dataset will fail if the HDF5 file is - not open for writing. -

      - Datatype conversion takes place at the time of a read or write - and is automatic. See the - Data Conversion - section of The Data Type Interface (H5T) in the - HDF5 User's Guide for a discussion of - data conversion, including the range of conversions currently - supported by the HDF5 libraries. -

      Parameters: -
      -
      hid_t dataset_id -
      Identifier of the dataset read from. -
      hid_t mem_type_id -
      Identifier of the memory datatype. -
      hid_t mem_space_id -
      Identifier of the memory dataspace. -
      hid_t file_space_id -
      Identifier of the dataset's dataspace in the file. -
      hid_t xfer_plist_id -
      Identifier of a transfer property list for this I/O operation. -
      const void * buf -
      Buffer with data to be written to the file. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Dextend -
      Signature: -
      herr_t H5Dextend(hid_t dataset_id, - const hsize_t * size - ) -
      Purpose: -
      Extends a dataset with unlimited dimension. -
      Description: -
      H5Dextend verifies that the dataset is at least of size - size. - The dimensionality of size is the same as that of - the dataspace of the dataset being changed. - This function cannot be applied to a dataset with fixed dimensions. -
      Parameters: -
      -
      hid_t dataset_id -
      Identifier of the dataset. -
      const hsize_t * size -
      Array containing the new magnitude of each dimension. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Dclose -
      Signature: -
      hid_t H5Dclose(hid_t dataset_id - ) -
      Purpose: -
      -
      Description: -
      H5Dclose ends access to a dataset specified by - dataset_id and releases resources used by it. - Further use of the dataset identifier is illegal in calls to - the dataset API. -
      Parameters: -
      -
      hid_t dataset_id -
      Identifier of the dataset to finish access to. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -HDF5 Reference Manual  -H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5P   -H5S   -H5T   -H5Z   -Tools   - -
      -
      - -
      -HDF Help Desk - -
      -Last modified: 12 August 1998 - - - diff --git a/doc/src/RM_H5E.html b/doc/src/RM_H5E.html deleted file mode 100644 index d29380e..0000000 --- a/doc/src/RM_H5E.html +++ /dev/null @@ -1,367 +0,0 @@ - - -HDF5/H5E Draft API Specification - - - - -
      -
      -HDF5 Reference Manual  -H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5P   -H5S   -H5T   -H5Z   -Tools   - -
      -
      - -
      -

      H5E: Error Interface

      -
      - -

      Error API Functions

      - -These functions provide error handling capabilities in the HDF5 environment. - - - -
      - -       - -       - -
      - -

      -The Error interface provides error handling in the form of a stack. -The FUNC_ENTER() macro clears the error stack whenever -an interface function is entered. -When an error is detected, an entry is pushed onto the stack. -As the functions unwind, additional entries are pushed onto the stack. -The API function will return some indication that an error occurred and -the application can print the error stack. -

      -Certain API functions in the H5E package, such as H5Eprint(), -do not clear the error stack. Otherwise, any function which -does not have an underscore immediately after the package name -will clear the error stack. For instance, H5Fopen() -clears the error stack while H5F_open() does not. -

      -An error stack has a fixed maximum size. -If this size is exceeded then the stack will be truncated and only the -inner-most functions will have entries on the stack. -This is expected to be a rare condition. -

      -Each thread has its own error stack, but since -multi-threading has not been added to the library yet, this -package maintains a single error stack. The error stack is -statically allocated to reduce the complexity of handling -errors within the H5E package. - - -


      -
      -
      Name: H5Eset_auto -
      Signature: -
      herr_t H5Eset_auto(H5E_auto_t func, - void *client_data - ) -
      Purpose: -
      Turns automatic error printing on or off. -
      Description: -
      H5Eset_auto turns on or off automatic printing of - errors. When turned on (non-null func pointer), - any API function which returns an error indication will - first call func, passing it client_data - as an argument. -

      - When the library is first initialized the auto printing function - is set to H5Eprint() (cast appropriately) and - client_data is the standard error stream pointer, - stderr. -

      - Automatic stack traversal is always in the - H5E_WALK_DOWNWARD direction. -

      Parameters: -
      -
      H5E_auto_t func -
      Function to be called upon an error condition. -
      void *client_data -
      Data passed to the error function. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Eget_auto -
      Signature: -
      herr_t H5Eget_auto(H5E_auto_t * func, - void **client_data - ) -
      Purpose: -
      Returns the current settings for the automatic error stack - traversal function and its data. -
      Description: -
      H5Eget_auto returns the current settings for the - automatic error stack traversal function, func, - and its data, client_data. Either (or both) - arguments may be null in which case the value is not returned. -
      Parameters: -
      -
      H5E_auto_t * func -
      Current setting for the function to be called upon an - error condition. -
      void **client_data -
      Current setting for the data passed to the error function. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Eclear -
      Signature: -
      herr_t H5Eclear(void) -
      Purpose: -
      Clears the error stack for the current thread. -
      Description: -
      H5Eclear clears the error stack for the current thread. -

      - The stack is also cleared whenever an API function is called, - with certain exceptions (for instance, H5Eprint()). -

      - H5Eclear can fail if there are problems initializing - the library. -

      Parameters: -
      -
      None -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Eprint -
      Signature: -
      herr_t H5Eprint(FILE * stream) -
      Purpose: -
      Prints the error stack in a default manner. -
      Description: -
      H5Eprint prints the error stack on the specified - stream, stream. - Even if the error stack is empty, a one-line message will be printed: -
           - HDF5-DIAG: Error detected in thread 0. -

      - H5Eprint is a convenience function for - H5Ewalk() with a function that prints error messages. - Users are encouraged to write there own more specific error handlers. -

      Parameters: -
      -
      FILE * stream -
      File pointer, or stderr if NULL. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Ewalk -
      Signature: -
      herr_t H5Ewalk(H5E_direction_t direction, - H5E_walk_t func, - void * client_data - ) -
      Purpose: -
      Walks the error stack for the current thread, calling a specified - function. -
      Description: -
      H5Ewalk walks the error stack for the current thread - and calls the specified function for each error along the way. -

      - direction determines whether the stack is walked - from the inside out or the outside in. - A value of H5E_WALK_UPWARD means begin with the - most specific error and end at the API; - a value of H5E_WALK_DOWNWARD means to start at the - API and end at the inner-most function where the error was first - detected. -

      - func will be called for each error in the error stack. - Its arguments will include an index number (beginning at zero - regardless of stack traversal direction), an error stack entry, - and the client_data pointer passed to - H5E_print. -

      - H5Ewalk can fail if there are problems initializing - the library. -

      Parameters: -
      -
      H5E_direction_t direction -
      Direction in which the error stack is to be walked. -
      H5E_walk_t func -
      Function to be called for each error encountered. -
      void * client_data -
      Data to be passed with func. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Ewalk_cb -
      Signature: -
      herr_t H5Ewalk_cb(int n, - H5E_error_t *err_desc, - void *client_data - ) -
      Purpose: -
      Default error stack traversal callback function - that prints error messages to the specified output stream. -
      Description: -
      H5Ewalk_cb is a default error stack traversal callback - function that prints error messages to the specified output stream. - It is not meant to be called directly but rather as an - argument to the H5Ewalk() function. - This function is called also by H5Eprint(). - Application writers are encouraged to use this function as a - model for their own error stack walking functions. -

      - n is a counter for how many times this function - has been called for this particular traversal of the stack. - It always begins at zero for the first error on the stack - (either the top or bottom error, or even both, depending on - the traversal direction and the size of the stack). -

      - err_desc is an error description. It contains all the - information about a particular error. -

      - client_data is the same pointer that was passed as the - client_data argument of H5Ewalk(). - It is expected to be a file pointer (or stderr if null). -

      Parameters: -
      -
      int n -
      Number of times this function has been called - for this traversal of the stack. -
      H5E_error_t *err_desc -
      Error description. -
      void *client_data -
      A file pointer, or stderr if null. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Eget_major -
      Signature: -
      const char * H5Eget_major(H5E_major_t n) -
      Purpose: -
      Returns a character string describing an error specified by a - major error number. -
      Description: -
      Given a major error number, H5Eget_major returns a - constant character string that describes the error. -
      Parameters: -
      -
      H5E_major_t n -
      Major error number. -
      -
      Returns: -
      Returns a character string describing the error if successful. - Otherwise returns "Invalid major error number." -
      - - -
      -
      -
      Name: H5Eget_minor -
      Signature: -
      const char * H5Eget_minor(H5E_minor_t n) -
      Purpose: -
      Returns a character string describing an error specified by a - minor error number. -
      Description: -
      Given a minor error number, H5Eget_minor returns a - constant character string that describes the error. -
      Parameters: -
      -
      H5E_minor_t n -
      Minor error number. -
      -
      Returns: -
      Returns a character string describing the error if successful. - Otherwise returns "Invalid minor error number." -
      - - -
      -
      -HDF5 Reference Manual  -H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5P   -H5S   -H5T   -H5Z   -Tools   - -
      -
      - -
      -HDF Help Desk - -
      -Last modified: 14 July 1998 - - - diff --git a/doc/src/RM_H5F.html b/doc/src/RM_H5F.html deleted file mode 100644 index 5ecc148..0000000 --- a/doc/src/RM_H5F.html +++ /dev/null @@ -1,334 +0,0 @@ - - -HDF5/H5F Draft API Specification - - - - -
      -
      -HDF5 Reference Manual  -H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5P   -H5S   -H5T   -H5Z   -Tools   - -
      -
      - -
      -

      H5F: File Interface

      -
      - -

      File API Functions

      - -These functions are designed to provide file-level access to HDF5 files. -Further manipulation of objects inside a file is performed through one of APIs -documented below. - - - -
      - -       - -       - -
      - - -
      -
      -
      Name: H5Fopen -
      Signature: -
      hid_t H5Fopen(const char *name, - unsigned flags, - hid_t access_id - ) -
      Purpose: -
      Opens an existing file. -
      Description: -
      H5Fopen opens an existing file and is the primary - function for accessing existing HDF5 files. -

      - The parameter access_id is a file access property - list identifier or H5P_DEFAULT for the default I/O access - parameters. -

      - The flags argument determines whether writing - to an existing file will be allowed or not. - The file is opened with read and write permission if - flags is set to H5F_ACC_RDWR. - All flags may be combined with the bit-wise OR operator (`|') - to change the behavior of the file open call. - The more complex behaviors of a file's access are controlled - through the file-access property list. -

      - Files which are opened more than once return a unique identifier - for each H5Fopen() call and can be accessed - through all file identifiers. -

      - The return value is a file identifier for the open file and it - should be closed by calling H5Fclose() when it is - no longer needed. -

      Parameters: -
      -
      const char *name -
      Name of the file to access. -
      unsigned flags -
      File access flags. See the H5Fcreate - parameters list for a list of possible values. -
      hid_t access_id -
      Identifier for the file access properties list. -
      -
      Returns: -
      Returns a file identifier if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Fcreate -
      Signature: -
      hid_t H5Fcreate(const char *name, - unsigned flags, - hid_t create_id, - hid_t access_id - ) -
      Purpose: -
      Creates HDF5 files. -
      Description: -
      H5Fcreate is the primary function for creating - HDF5 files . -

      - The flags parameter determines whether an - existing file will be overwritten. All newly created files - are opened for both reading and writing. All flags may be - combined with the bit-wise OR operator (`|') to change - the behavior of the H5Fcreate call. -

      - The more complex behaviors of file creation and access - are controlled through the file-creation and file-access - property lists. The value of H5P_DEFAULT for - a property list value indicates that the library should use - the default values for the appropriate property list. Also see - H5Fpublic.h for the list of supported flags. -

      Parameters: -
      -
      const char *name -
      Name of the file to access. -
      uintn flags -
      File access flags. Possible values include: -
        -
        H5F_ACC_RDWR -
        Allow read and write access to file. -
        H5F_ACC_RDONLY -
        Allow read-only access to file. -
        H5F_ACC_TRUNC -
        Truncate file, if it already exists, - erasing all data previously stored in the file. -
        H5F_ACC_EXCL -
        Fail if file already exists. -
        H5F_ACC_DEBUG -
        Print debug information. -
        H5P_DEFAULT -
        Apply default file access and creation properties. -
      -
      hid_t create_id -
      File creation property list identifier, used when modifying - default file meta-data. -
      hid_t access_id -
      File access property list identifier. - If parallel file access is desired, this is a collective - call according to the communicator stored in the - access_id. - Use 0 for default access properties. -
      -
      Returns: -
      Returns a file identifier if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Fflush -
      Signature: -
      herr_t H5Fis_hdf5(hid_t object_id - ) -
      Purpose: -
      Flushes all buffers associated with a file to disk. -
      Description: -
      H5Fflush causes all buffers associated with a - file to be immediately flushed to disk without removing the - data from the cache. -

      - object_id can be any object associated with the file, - including the file itself, a dataset, a group, an attribute, or - a named data type. -

      Parameters: -
      -
      const char *object_id -
      Identifier of object used to identify the file. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise returns FAIL (-1). -
      - - -
      -
      -
      Name: H5Fis_hdf5 -
      Signature: -
      hbool_t H5Fis_hdf5(const char *name - ) -
      Purpose: -
      Determines whether a file is in the HDF5 format. -
      Description: -
      H5Fis_hdf5 determines whether a file is in - the HDF5 format. -
      Parameters: -
      -
      const char *name -
      File name to check format. -
      -
      Returns: -
      Returns TRUE or FALSE if successful. - Otherwise returns FAIL (-1). -
      - - -
      -
      -
      Name: H5Fget_create_plist -
      Signature: -
      hid_t H5Fget_create_plist(hid_t file_id - ) -
      Purpose: -
      Returns a file creation property list identifier. -
      Description: -
      H5Fget_create_plist returns a file creation - property list identifier identifying the creation properties - used to create this file. This function is useful for - duplicating properties when creating another file. -

      - See "File Creation Properties" in - H5P: Property List Interface - in this reference manual and - "File Creation Properties" - in Files in the - HDF5 User's Guide for - additional information and related functions. -

      Parameters: -
      -
      hid_t file_id -
      Identifier of the file to get creation property list of -
      -
      Returns: -
      Returns a file creation property list identifier if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Fget_access_plist -
      Signature: -
      hid_t H5Fget_access_plist(hid_t file_id) -
      Purpose: -
      Returns a file access property list identifier. -
      Description: -
      H5Fget_access_plist returns the - file access property list identifier of the specified file. -

      - See "File Access Properties" in - H5P: Property List Interface - in this reference manual and - "File Access Property Lists" - in Files in the - HDF5 User's Guide for - additional information and related functions. -

      Parameters: -
      -
      hid_t file_id -
      Identifier of file to get access property list of -
      -
      Returns: -
      Returns a file access property list identifier if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Fclose -
      Signature: -
      herr_t H5Fclose(hid_t file_id - ) -
      Purpose: -
      Terminates access to an HDF5 file. -
      Description: -
      H5Fclose terminates access to an HDF5 file. - If this is the last file identifier open for a file - and if access identifiers are still in use, - this function will fail. -
      Parameters: -
      -
      hid_t file_id -
      Identifier of a file to terminate access to. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -HDF5 Reference Manual  -H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5P   -H5S   -H5T   -H5Z   -Tools   - -
      -
      - -
      -HDF Help Desk - -
      -Last modified: 14 August 1998 - - - diff --git a/doc/src/RM_H5Front.html b/doc/src/RM_H5Front.html deleted file mode 100644 index c3701b8..0000000 --- a/doc/src/RM_H5Front.html +++ /dev/null @@ -1,98 +0,0 @@ - - -HDF5 Draft API Specification - - - -
      -
      -HDF5 Reference Manual  -H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5P   - -H5S   -H5T   -H5Z   -Tools   - -
      -
      - -
      -

      HDF5: API Specification
      Reference Manual

      -
      - -The HDF5 libraries provide several interfaces, each of which provides the -tools required to meet specific aspects of the HDF5 data-handling requirements. - - - -
      -
      -HDF5 Reference Manual  -H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5P   - -H5S   -H5T   -H5Z   -Tools   - -
      -
      - -
      - - -
      -HDF Help Desk -
      -Last modified: 8 September 1998 - -
      -Copyright   -
      - - - - diff --git a/doc/src/RM_H5G.html b/doc/src/RM_H5G.html deleted file mode 100644 index a4ca46a..0000000 --- a/doc/src/RM_H5G.html +++ /dev/null @@ -1,744 +0,0 @@ - - -HDF5/H5G Draft API Specification - - - - -
      -
      -HDF5 Reference Manual  -H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5P   -H5S   -H5T   -H5Z   -Tools   - -
      -
      - -
      -

      H5G: Group Interface

      -
      - -

      Group Object API Functions

      - -The Group interface functions create and manipulate physical groups -of objects on disk. - - - -
      - -       - -       - -
      -(NYI = Not yet implemented) -
      - -

      -A group associates names with objects and provides a mechanism -for mapping a name to an object. Since all objects appear in at -least one group (with the possible exception of the root object) -and since objects can have names in more than one group, the set -of all objects in an HDF5 file is a directed graph. The internal -nodes (nodes with out-degree greater than zero) must be groups -while the leaf nodes (nodes with out-degree zero) are either empty -groups or objects of some other type. Exactly one object in every -non-empty file is the root object. The root object always has a -positive in-degree because it is pointed to by the file boot block. - -

      -Every file identifier returned by H5Fcreate or -H5Fopen maintains an independent current working group -stack, the top item of which is the current working group. The -stack can be manipulated with H5Gset, H5Gpush, -and H5Gpop. The root object is the current working group -if the stack is empty. - -

      -An object name consists of one or more components separated from -one another by slashes. An absolute name begins with a slash and the -object is located by looking for the first component in the root -object, then looking for the second component in the first object, etc., -until the entire name is traversed. A relative name does not begin -with a slash and the traversal begins with the current working group. - -

      -The library does not maintain the full absolute name of its current -working group because (1) cycles in the graph can make the name length -unbounded and (2) a group does not necessarily have a unique name. A -more Unix-like hierarchical naming scheme can be implemented on top of -the directed graph scheme by creating a ".." entry in each group that -points to its single predecessor; a getcwd function would -then be trivial. - -


      -
      -
      Name: H5Gcreate -
      Signature: -
      hid_t H5Gcreate(hid_t loc_id, - const char *name, - size_t size_hint - ) -
      Purpose: -
      Creates a new empty group and gives it a name. -
      Description: -
      H5Gcreate creates a new group with the specified - name at the specified location, loc_id. - The location is identified by a file or group identifier. - The name, name, must not already be taken by some - other object and all parent groups must already exist. -

      - size_hint is a hint for the number of bytes to - reserve to store the names which will be eventually added to - the new group. Passing a value of zero for size_hint - is usually adequate since the library is able to dynamically - resize the name heap, but a correct hint may result in better - performance. - If a non-positive value is supplied for size_hint, - then a default size is chosen. -

      - The return value is a group identifier for the open group. - This group identifier should be closed by calling - H5Gclose() when it is no longer needed. -

      Parameters: -
      -
      hid_t loc_id -
      The file or group identifier. -
      const char *name -
      The absolute or relative name of the new group. -
      size_t size_hint -
      An optional parameter indicating the number of bytes - to reserve for the names that will appear in the group. - A conservative estimate could result in multiple - system-level I/O requests to read the group name heap; - a liberal estimate could result in a single large - I/O request even when the group has just a few names. - HDF5 stores each name with a null terminator. -
      -
      Returns: -
      Returns a valid group identifier for the open group if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Gopen -
      Signature: -
      hid_t H5Gopen(hid_t loc_id, - const char *name - ) -
      Purpose: -
      Opens an existing group for modification and returns a group - identifier for that group. -
      Description: -
      H5Gopen opens an existing group with the specified name at - the specified location, loc_id. - The location is identified by a file or - group identifier, and returns a group identifier for the group. - The obtained group identifier should be released by calling - H5Gclose() when it is no longer needed. -
      Parameters: -
      -
      hid_t loc_id -
      File or group identifier within which group is to be open. -
      const char * name -
      Name of group to open. -
      -
      Returns: -
      Returns a valid group identifier if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Gset -
      Signature: -
      herr_t H5Gset (hid_t loc_id, - const char *name - ) -
      Purpose: -
      Sets the current working group within a file. -
      Description: -
      H5Gset sets the group with the specified name - to be the current working group for the file which contains it. - This function sets the current working group by modifying the - top element of the current working group stack or, if the - stack is empty, by pushing a new element onto the stack. - The initial current working group is the root group. -

      - loc_id can be a file identifier or a group identifier. -

      - name is an absolute or relative name and is resolved as follows. Each file identifier - has a current working group, initially the root group of the - file. Relative names do not begin with a slash and are relative - to the specified group or to the current working group. - Absolute names begin with a slash and are relative to the file's - root group. For instance, the name /Foo/Bar/Baz is - resolved by first looking up Foo in the root group; - the name Foo/Bar/Baz is resolved by first looking - up the name Foo in the current working group. -

      - Each file identifier maintains its own notion of the current - working group. If loc_id is a group identifier, the - file identifier is derived from the group identifier. -

      - If a single file is opened with multiple calls to H5Fopen(), - which would return multiple file identifiers, then each - identifier's current working group can be set independently - of the other file identifiers for that file. -

      Parameters: -
      -
      hid_t loc_id -
      The file or group identifier. -
      const char *name -
      The name of the new current working group. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Gpush -
      Signature: -
      herr_t H5Gpush (hid_t loc_id, - const char *name - ) -
      Purpose: -
      Sets the current working group by pushing a - new element onto the current working group stack. -
      Description: -
      Each file identifier maintains a stack of groups, the top group - of which is the current working group. The stack initially - contains only the root group. H5Gpush pushes a new group - onto the stack, thus setting a new current working group. -
      Parameters: -
      -
      hid_t loc_id -
      File or group identifier. -
      const char *name -
      The name of the new current working group. The name may be - an absolute or relative name. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Gpop -
      Signature: -
      herr_t H5Gpop (hid_t loc_id) -
      Purpose: -
      Removes the top, or latest, entry from the working group stack, - setting the current working group to the previous value. -
      Description: -
      H5Gpop restores the previous current working group by - popping an element from the current working group stack. - An empty stack implies that the current working group is the root - object. Attempting to pop an empty stack results in failure. -

      - Each file identfier maintains its own notion of the current - working group. That is, if a single file is opened with - multiple calls to H5Fopen(), which returns multiple file - handles, then each identfier's current working group can be - set independently of the other file identfiers for that file. -

      - If loc_id is a group identifier, it is used only to determine the - file identifier for the stack from which to pop the top entry. -

      Parameters: -
      -
      hid_t loc_id -
      The file, group, dataset, or datatype identifier. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Gclose -
      Signature: -
      herr_t H5Gclose(hid_t group_id) -
      Purpose: -
      Closes the specified group. -
      Description: -
      H5Gclose releases resources used by a group which was - opened by H5Gcreate() or H5Gopen(). - After closing a group, the group_id cannot be used again. -

      - Failure to release a group with this call will result in resource leaks. -

      Parameters: -
      -
      hid_t group_id -
      Group identifier to release. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Glink -
      Signature: -
      herr_t H5Glink(hid_t loc_id, - H5G_link_t link_type, - const char *current_name, - const char *new_name - ) -
      Purpose: -
      Creates a link of the specified type from new_name - to current_name. -
      Description: -
      H5Glink creates a new name for an object that has some current - name, possibly one of many names it currently has. -

      - If link_type is H5G_LINK_HARD, then - current_name must name an existing object and both - names are interpreted relative to loc_id, which is - either a file identifier or a group identifier. -

      - If link_type is H5G_LINK_SOFT, then - current_name can be anything and is interpreted at - lookup time relative to the group which contains the final - component of new_name. For instance, if - current_name is ./foo, - new_name is ./x/y/bar, and a request - is made for ./x/y/bar, then the actual object looked - up is ./x/y/./foo. -

      Parameters: -
      -
      hid_t loc_id -
      File, group, dataset, or datatype identifier. -
      H5G_link_t link_type -
      Link type. - Possible values are H5G_LINK_HARD and H5G_LINK_SOFT. -
      const char * current_name -
      Name of the existing object if link is a hard link. - Can be anything for the soft link. -
      const char * new_name -
      New name for the object. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Gunlink -      - (Not implemented in this release.) -
      Signature: -
      herr_t H5Gunlink(hid_t loc_id, - const char *name - ) -
      Purpose: -
      Removes the specified name from the group graph and - decrements the link count for the object to which name points -
      Description: -
      H5Gunlink removes an association between a name and an object. - Object headers keep track of how many hard links refer to the object; - when the hard link count reaches zero, the object can be removed - from the file. Objects which are open are not removed until all - identifiers to the object are closed. -

      - If the link count reaches zero, all file-space associated with - the object will be reclaimed. If the object is open, the - reclamation of the file space is delayed until all handles to the - object are closed. -

      Parameters: -
      -
      hid_t loc_id -
      Identifier of the file containing the object. -
      const char * name -
      Name of the object to unlink. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Giterate -
      Signature: -
      int H5Giterate(hid_t loc_id, - const char *name, - int *idx, - H5G_operator_t operator, - void *operator_data - ) -
      Purpose: -
      Iterates an operation over the entries of a group. -
      Description: -
      H5Giterate iterates over the members of - name in the file or group specified with - loc_id. - For each object in the group, the operator_data - and some additional information, specified below, are - passed to the operator function. - The iteration begins with the idx object in the - group and the next element to be processed by the operator is - returned in idx. If idx - is NULL, then the iterator starts at the first group member; - since no stopping point is returned in this case, the iterator - cannot be restarted if one of the calls to its operator returns - non-zero. -

      - The prototype for H5G_operator_t is: -

        -
        typedef herr_t *(H5G_operator_t)(hid_t group_id, - const char *member_name, void *operator_data/*in,out*/); -
      -
      The operation receives the group identifier for the group being - iterated over, group_id, the name of the current - object within the group, member_name, and the - pointer to the operator data passed in to H5Giterate, - operator_data. -

      - The return values from an operator are: -

        -
      • Zero causes the iterator to continue, returning - zero when all group members have been processed. -
      • Positive causes the iterator to immediately return that positive - value, indicating short-circuit success. The iterator can be - restarted at the next group member. -
      • Negative causes the iterator to immediately return that value, - indicating failure. The iterator can be restarted at the next - group member. -
      -
      Parameters: -
      -
      hid_t loc_id -
      IN: File or group identifier. -
      const char *name -
      IN: Group over which the iteration is performed. -
      int *idx -
      IN/OUT: Location at which to begin the iteration. -
      H5G_iterate_t operator -
      IN: Operation to be performed on an object at each step of - the iteration. -
      void *operator_data -
      IN/OUT: Data associated with the operation. -
      -
      Returns: -
      Returns the return value of the last operator if it was non-zero, - or zero if all group members were processed. - Otherwise, returns FAIL (-1). -
      - - -
      -
      -
      Name: H5Gmove -      - (Not implemented in this release.) -
      Signature: -
      herr_t H5Gmove(hid_t loc_id, - const char *src, - const char *dst - ) -
      Purpose: -
      Renames an object within an HDF5 file. -
      Description: -
      H5Gmove renames an object within an HDF5 file. - The original name, src, is unlinked from the - group graph and the new name, dst, is inserted - as an atomic operation. Both names are interpreted relative - to loc_id, which is either a file or a group - identifier. -
      Parameters: -
      -
      hid_t loc_id -
      File or group identifier. -
      const char *src -
      Object's original name. -
      const char *dst -
      Object's new name. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Gget_objinfo -
      Signature: -
      herr_t H5Gget_objinfo(hid_t loc_id, - const char *name, - hbool_t follow_link, - H5G_stat_t *statbuf - ) -
      Purpose: -
      Returns information about an object. -
      Description: -
      H5Gget_objinfo returns information about the - specified object through the statbuf argument. - loc_id (a file, group, or dataset identifier) and - name together determine the object. - If the object is a symbolic link and follow_link is - zero (0), then the information returned is that for the link itself; - otherwise the link is followed and information is returned about - the object to which the link points. - If follow_link is non-zero but the final symbolic link - is dangling (does not point to anything), then an error is returned. - The statbuf fields are undefined for an error. - The existence of an object can be tested by calling this function - with a null statbuf. -

      - H5Gget_objinfo() fills in the following data structure: -

      -                  typedef struct H5G_stat_t {
      -                      unsigned long fileno;
      -                      haddr_t objno;
      -                      unsigned nlink;
      -                      H5G_obj_t type;
      -                      time_t mtime; 
      -                      size_t linklen;
      -                  } H5G_stat_t
      -        
      - The fileno and objno fields contain - values which uniquely itentify an object among those - HDF5 files which are open: if both values are the same - between two objects, then the two objects are the same - (provided both files are still open). - The nlink field is the number of hard links to - the object or zero when information is being returned about a - symbolic link (symbolic links do not have hard links but - all other objects always have at least one). - The type field contains the type of the object, - one of H5G_GROUP, H5G_DATASET, - or H5G_LINK. - The mtime field contains the modification time. - If information is being returned about a symbolic link then - linklen will be the length of the link value - (the name of the pointed-to object with the null terminator); - otherwise linklen will be zero. - Other fields may be added to this structure in the future. -
      Note: -
      Some systems will be able to record the time accurately but - unable to retrieve the correct time; such systems (e.g., Irix64) - will report an mtime value of 0 (zero). -
      Parameters: -
      -
      hid_t loc_id -
      IN: File, group, dataset, or datatype identifier. -
      const char *name -
      IN: Name of the object for which status is being sought. -
      hbool_t follow_link -
      IN: Link flag. -
      H5G_stat_t *statbuf -
      OUT: Buffer in which to return information about the object. -
      -
      Returns: -
      Returns SUCCEED (0) with the fields of statbuf - (if non-null) initialized. - Otherwise returns FAIL (-1). -
      - - -
      -
      -
      Name: H5Gget_linkval -
      Signature: -
      herr_t H5Gget_linkval(hid_t loc_id, - const char *name, - size_t size, - char *value - ) -
      Purpose: -
      Returns link value. -
      Description: -
      H5Gget_linkval returns size - characters of the link value through the value - argument if loc_id (a file or group identifier) - and name specify a symbolic link. - If size is smaller than the link value, then - value will not be null terminated. -

      - This function fails if the specified object is not a symbolic link. - The presence of a symbolic link can be tested by passing zero for - size and NULL for value. -

      - Use H5Gget_objinfo() to get the size of a link value. -

      Parameters: -
      -
      hid_t loc_id -
      IN: Identifier of the file, group, dataset, or datatype. -
      const char *name -
      IN: Name of the object whose link value is to be checked. -
      size_t size -
      IN: Maximum number of characters of value - to be returned. -
      char *value -
      OUT: Link value. -
      -
      Returns: -
      Returns SUCCEED (0), with the link value in value, - if successful. - Otherwise returns FAIL (-1). -
      - - - - - - -
      -
      -
      Name: H5Gset_comment -
      Signature: -
      herr_t H5Gset_comment(hid_t loc_id, - const char *name, - const char *comment - ) -
      Purpose: -
      Sets comment for specified object. -
      Description: -
      H5Gset_comment sets the comment for the the - object name to comment. - Any previously existing comment is overwritten. -

      - If comment is the empty string or a - null pointer, the comment message is removed from the object. -

      - Comments should be relatively short, null-terminated, - ASCII strings. -

      - Comments can be attached to any object that has an object header, - e.g., data sets, groups, named data types, and data spaces, but - not symbolic links. -

      Parameters: -
      -
      hid_t loc_id -
      IN: Identifier of the file, group, dataset, or datatype. -
      const char *name -
      IN: Name of the object whose comment is to be set or reset. -
      const char *comment -
      IN: The new comment. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise returns FAIL (-1). -
      - - -
      -
      -
      Name: H5Gget_comment -
      Signature: -
      herr_t H5Gget_comment(hid_t loc_id, - const char *name, - size_t bufsize, - char *comment - ) -
      Purpose: -
      Retrieves comment for specified object. -
      Description: -
      H5Gget_comment retrieves the comment for the the - object name. The comment is returned in the buffer - comment. -

      - At most bufsize characters, including a null - terminator, are copied. The result is not null terminated - if the comment is longer than the supplied buffer. -

      - If an object does not have a comment, the empty string - is returned. -

      Parameters: -
      -
      hid_t loc_id -
      IN: Identifier of the file, group, dataset, or datatype. -
      const char *name -
      IN: Name of the object whose comment is to be set or reset. -
      size_t bufsize -
      IN: Anticipated size of the buffer required to hold - comment. -
      char *comment -
      OUT: The comment. -
      -
      Returns: -
      Returns the number of characters in the comment, - counting the null terminator, if successful; the value - returned may be larger than bufsize. - Otherwise returns FAIL (-1). -
      - - - - - - - -
      -
      -HDF5 Reference Manual  -H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5P   -H5S   -H5T   -H5Z   -Tools   - -
      -
      - -
      -HDF Help Desk - -
      -Last modified: 2 September 1998 - - - diff --git a/doc/src/RM_H5P.html b/doc/src/RM_H5P.html deleted file mode 100644 index d9f1191..0000000 --- a/doc/src/RM_H5P.html +++ /dev/null @@ -1,1996 +0,0 @@ - - -HDF5/H5P Draft API Specification - - - - -
      -
      -HDF5 Reference Manual  -H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5P   -H5S   -H5T   -H5Z   -Tools   - -
      -
      - -
      -

      H5P: Property List Interface

      -
      -

      Property List API Functions

      - -These functions manipulate property list objects to allow objects which require -many different parameters to be easily manipulated. - - - - - - - -
      - - General Property List Operations - - -

      File Creation Properties -

      - -
             - - File Access Properties - - -       - - Dataset Creation Properties - - -

      Dataset Memory and Transfer Properties -

      - -
      -
      -||   Available only in the parallel HDF5 library. -
      -
      -
      -
      Name: H5Pcreate -
      Signature: -
      hid_t H5Pcreate(H5P_class_t type - ) -
      Purpose: -
      Creates a new property as an instance of a property list class. -
      Description: -
      H5Pcreate creates a new property as an instance of some - property list class. The new property list is initialized - with default values for the specified class. The classes are: -
      -
      H5P_FILE_CREATE -
      Properties for file creation. - See Files - in the HDF User's Guide - for details about the file creation properties. -
      H5P_FILE_ACCESS -
      Properties for file access. - See Files - in the HDF User's Guide - for details about the file creation properties. -
      H5P_DATASET_CREATE -
      Properties for dataset creation. - See Datasets - in the HDF User's Guide - for details about dataset creation properties. -
      H5P_DATASET_XFER -
      Properties for raw data transfer. - See Datasets - in the HDF User's Guide - for details about raw data transfer properties. -
      -
      Parameters: -
      -
      H5P_class_t type -
      IN: The type of property list to create. -
      -
      Returns: -
      Returns a property list identifier (plist) if successful; - otherwise Fail (-1). -
      - -
      -
      -
      Name: H5Pclose -
      Signature: -
      herr_t H5Pclose(hid_t plist - ) -
      Purpose: -
      Terminates access to a property list. -
      Description: -
      H5Pclose terminates access to a property list. - All property lists should be closed when the application is - finished accessing them. - This frees resources used by the property list. -
      Parameters: -
      -
      hid_t plist -
      IN: Identifier of the property list to terminate access to. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Pget_class -
      Signature: -
      H5P_class_t H5Pget_class(hid_t plist - ) -
      Purpose: -
      Returns the property list class for a property list. -
      Description: -
      H5Pget_class returns the property list class for the - property list identied by the plist parameter. - Valid property list classes are defined in the description of - H5Pcreate(). -
      Parameters: -
      -
      hid_t plist -
      IN: Identifier of property list to query. -
      -
      Returns: -
      Returns a property list class if successful. - Otherwise returns H5P_NO_CLASS (-1). -
      - - -
      -
      -
      Name: H5Pcopy -
      Signature: -
      hid_t H5Pcopy(hid_t plist - ) -
      Purpose: -
      Copies an existing property list to create a new property list. -
      Description: -
      H5Pcopy copies an existing property list to create - a new property list. - The new property list has the same properties and values - as the original property list. -
      Parameters: -
      -
      hid_t plist -
      IN: Identifier of property list to duplicate. -
      -
      Returns: -
      Returns a property list identifier if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Pget_version -
      Signature: -
      herr_t H5Pget_version(hid_t plist, - int * boot, - int * freelist, - int * stab, - int * shhdr - ) -
      Purpose: -
      Retrieves the version information of various objects for - a file creation property list. -
      Description: -
      H5Pget_version retrieves the version information of various objects - for a file creation property list. Any pointer parameters which are - passed as NULL are not queried. -
      Parameters: -
      -
      hid_t plist -
      IN: Identifier of the file creation property list. -
      int * boot -
      OUT: Pointer to location to return boot block version number. -
      int * freelist -
      OUT: Pointer to location to return global freelist version number. -
      int * stab -
      OUT: Pointer to location to return symbol table version number. -
      int * shhdr -
      OUT: Pointer to location to return shared object header version number. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Pset_userblock -
      Signature: -
      herr_t H5Pset_userblock(hid_t plist, - hsize_t size - ) -
      Purpose: -
      Sets user block size. -
      Description: -
      H5Pset_userblock sets the user block size of a - file creation property list. - The default user block size is 0; it may be set to any - power of 2 equal to 512 or greater (512, 1024, 2048, etc.). -
      Parameters: -
      -
      hid_t plist -
      IN: Identifier of property list to modify. -
      hsize_t size -
      IN: Size of the user-block in bytes. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Pget_userblock -
      Signature: -
      herr_t H5Pget_userblock(hid_t plist, - hsize_t * size - ) -
      Purpose: -
      Retrieves the size of a user block. -
      Description: -
      H5Pget_userblock retrieves the size of a user block - in a file creation property list. -
      Parameters: -
      -
      hid_t plist -
      IN: Identifier for property list to query. -
      hsize_t * size -
      OUT: Pointer to location to return user-block size. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Pset_sizes -
      Signature: -
      herr_t H5Pset_sizes(hid_t plist, - size_t sizeof_addr, - size_t sizeof_size - ) -
      Purpose: -
      Sets the byte size of the offsets and lengths used to address objects - in an HDF5 file. -
      Description: -
      H5Pset_sizes sets the byte size of the offsets and lengths used to - address objects in an HDF5 file. This function is only valid for - file creation property lists. Passing in a value of 0 for one of the - sizeof parameters retains the current value. The default value - for both values is 4 bytes. Valid values currenly are 2, 4, 8 and - 16. -
      Parameters: -
      -
      hid_t plist -
      IN: Identifier of property list to modify. -
      size_t sizeof_addr -
      IN: Size of an object offset in bytes. -
      size_t sizeof_size -
      IN: Size of an object length in bytes. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Pget_sizes -
      Signature: -
      herr_t H5Pget_sizes(hid_t plist, - size_t * sizeof_addr, - size_t * sizeof_size - ) -
      Purpose: -
      Retrieves the size of the offsets and lengths used in an HDF5 file. -
      Description: -
      H5Pget_sizes retrieves the size of the offsets - and lengths used in an HDF5 file. - This function is only valid for file creation property lists. -
      Parameters: -
      -
      hid_t plist -
      IN: Identifier of property list to query. -
      size_t * size -
      OUT: Pointer to location to return offset size in bytes. -
      size_t * size -
      OUT: Pointer to location to return length size in bytes. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Pset_mpi -
      Signature: -
      herr_t H5Pset_mpi(hid_t plist, - MPI_Comm comm, - MPI_Info info - ) -
      Purpose: -
      Retrieves the access mode for parallel I/O and the user supplied - communicator and info object. -
      Description: -
      H5Pset_mpi stores the access mode for MPIO call and the user supplied - communicator and info in the access property list, which can then - be used to open file. This function is available only in the - parallel HDF5 library and is not a collective function. -
      Parameters: -
      -
      hid_t plist -
      IN: Identifier of property list to modify -
      MPI_Comm comm -
      IN: MPI communicator to be used for file open as defined in - MPI_FILE_OPEN of MPI-2. This function does not make a - duplicated comm. Any modification to comm after - this function call returns may have undetermined effect - to the access property list. Users should call this function - again to setup the property list. -
      MPI_Info info -
      IN: MPI info object to be used for file open as defined in - MPI_FILE_OPEN of MPI-2. This function does not make a - duplicated info. Any modification to info after - this function call returns may have undetermined effect - to the access property list. Users should call this function - again to setup the property list. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Pget_mpi -
      Signature: -
      herr_t H5Pget_mpi(hid_t plist, - MPI_Comm *comm, - MPI_Info *info - ) -
      Purpose: -
      Retrieves the communicator and info object. -
      Description: -
      H5Pget_mpi retrieves the communicator and info object - that have been set by H5Pset_mpi. - This function is available only in the parallel HDF5 library - and is not a collective function. -
      Parameters: -
      -
      hid_t plist -
      IN: Identifier of a file access property list that has been set - successfully by H5Pset_mpi. -
      MPI_Comm * comm -
      OUT: Pointer to location to return the communicator. -
      MPI_Info * info -
      OUT: Pointer to location to return the info object. -
      -
      Returns: -
      Returns SUCCEED (0) if the file access property list is set to the MPI. - Otherwise returns FAIL (-1). -
      - - -
      -
      -
      Name: H5Pset_xfer -
      Signature: -
      herr_t H5Pset_xfer(hid_t plist, - H5D_transfer_t data_xfer_mode - ) -
      Purpose: -
      Sets the transfer mode of the dataset transfer property list. -
      Description: -
      H5Pset_xfer sets the transfer mode of the dataset transfer property list. - The list can then be used to control the I/O transfer mode - during dataset accesses. This function is available only - in the parallel HDF5 library and is not a collective function. -

      - Valid data transfer modes are: -

        -
        H5D_XFER_INDEPENDENT -
        Use independent I/O access. - (Currently the default mode.) -
        H5D_XFER_COLLECTIVE -
        Use MPI collective I/O access. -
        H5D_XFER_DFLT -
        User default I/O access. -
      -
      Parameters: -
      -
      hid_t plist -
      IN: Identifier of a dataset transfer property list -
      H5D_transfer_t data_xfer_mode -
      IN: Data transfer mode. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Pget_xfer -
      Signature: -
      herr_t H5Pget_xfer(hid_t plist, - H5D_transfer_t * data_xfer_mode - ) -
      Purpose: -
      Retrieves the transfer mode from the dataset transfer property list. -
      Description: -
      H5Pget_xfer retrieves the transfer mode from the - dataset transfer property list. - This function is available only in the parallel HDF5 library - and is not a collective function. -
      Parameters: -
      -
      hid_t plist -
      IN: Identifier of a dataset transfer property list. -
      H5D_transfer_t * data_xfer_mode -
      OUT: Pointer to location to return the data transfer mode. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Pset_sym_k -
      Signature: -
      herr_t H5Pset_sym_k(hid_t plist, - int ik, - int lk - ) -
      Purpose: -
      Sets the size of parameters used to control the symbol table nodes. -
      Description: -
      H5Pset_sym_k sets the size of parameters used to - control the symbol table nodes. This function is only valid - for file creation property lists. Passing in a value of 0 for - one of the parameters retains the current value. -

      - ik is one half the rank of a tree that stores a symbol - table for a group. Internal nodes of the symbol table are on - average 75% full. That is, the average rank of the tree is - 1.5 times the value of ik. -

      - lk is one half of the number of symbols that can - be stored in a symbol table node. A symbol table node is the - leaf of a symbol table tree which is used to store a group. - When symbols are inserted randomly into a group, the group's - symbol table nodes are 75% full on average. That is, they - contain 1.5 times the number of symbols specified by - lk. -

      Parameters: -
      -
      hid_t plist -
      IN: Identifier for property list to query. -
      int ik -
      IN: Symbol table tree rank. -
      int lk -
      IN: Symbol table node size. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Pget_sym_k -
      Signature: -
      herr_t H5Pget_sym_k(hid_t plist, - int * ik, - int * lk - ) -
      Purpose: -
      Retrieves the size of the symbol table B-tree 1/2 rank - and the symbol table leaf node 1/2 size. -
      Description: -
      H5Pget_sym_k retrieves the size of the - symbol table B-tree 1/2 rank and the symbol table leaf - node 1/2 size. This function is only valid for file creation - property lists. If a parameter valued is set to NULL, that - parameter is not retrieved. See the description for - H5Pset_sym_k for more - information. -
      Parameters: -
      -
      hid_t plist -
      IN: Property list to query. -
      int * ik -
      OUT: Pointer to location to return the symbol table's B-tree 1/2 rank. -
      int * size -
      OUT: Pointer to location to return the symbol table's leaf node 1/2 size. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Pset_istore_k -
      Signature: -
      herr_t H5Pset_istore_k(hid_t plist, - int ik - ) -
      Purpose: -
      Sets the size of the parameter used to control the - B-trees for indexing chunked datasets. -
      Description: -
      H5Pset_istore_k sets the size of the parameter - used to control the B-trees for indexing chunked datasets. - This function is only valid for file creation property lists. - Passing in a value of 0 for one of the parameters retains - the current value. -

      - ik is one half the rank of a tree that stores - chunked raw data. On average, such a tree will be 75% full, - or have an average rank of 1.5 times the value of - ik. -

      Parameters: -
      -
      hid_t plist -
      IN: Identifier of property list to query. -
      int ik -
      IN: 1/2 rank of chunked storage B-tree. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Pget_istore_k -
      Signature: -
      herr_t H5Pget_istore_k(hid_t plist, - int * ik - ) -
      Purpose: -
      Queries the 1/2 rank of an indexed storage B-tree. -
      Description: -
      H5Pget_istore_k queries the 1/2 rank of - an indexed storage B-tree. - The argument ik may be the null pointer (NULL). - This function is only valid for file creation property lists. -

      - See H5Pset_istore_k for details. -

      Parameters: -
      -
      hid_t plist -
      IN: Identifier of property list to query. -
      int * ik -
      OUT: Pointer to location to return the chunked storage B-tree 1/2 rank. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Pset_layout -
      Signature: -
      herr_t H5Pset_layout(hid_t plist, - H5D_layout_t layout - ) -
      Purpose: -
      Sets the type of storage used store the raw data for a dataset. -
      Description: -
      H5Pset_layout sets the type of storage used store the - raw data for a dataset. - This function is only valid for dataset creation property lists. - Valid parameters for layout are: -
        -
        H5D_COMPACT -
        Store raw data and object header contiguously in file. - This should only be used for very small amounts of raw - data (suggested less than 1KB). -
        H5D_CONTIGUOUS -
        Store raw data seperately from object header in one - large chunk in the file. -
        H5D_CHUNKED -
        Store raw data seperately from object header in one - large chunk in the file and store chunks of the raw - data in seperate locations in the file. -
      -
      Parameters: -
      -
      hid_t plist -
      IN: Identifier of property list to query. -
      H5D_layout_t layout -
      IN: Type of storage layout for raw data. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Pget_layout -
      Signature: -
      H5D_layout_t H5Pget_layout(hid_t plist) -
      Purpose: -
      Returns the layout of the raw data for a dataset. -
      Description: -
      H5Pget_layout returns the layout of the raw data for - a dataset. This function is only valid for dataset creation - property lists. Valid types for layout are: -
        -
        H5D_COMPACT -
        Raw data and object header stored contiguously in file. -
        H5D_CONTIGUOUS -
        Raw data stored seperately from object header in one - large chunk in the file. -
        H5D_CHUNKED -
        Raw data stored seperately from object header in - chunks in seperate locations in the file. -
      -
      Parameters: -
      -
      hid_t plist -
      IN: Identifier for property list to query. -
      -
      Returns: -
      Returns the layout type of a a dataset creation property list - if successful. - Otherwise returns H5D_LAYOUT_ERROR (-1). -
      - - -
      -
      -
      Name: H5Pset_chunk -
      Signature: -
      herr_t H5Pset_chunk(hid_t plist, - int ndims, - const hsize_t * dim - ) -
      Purpose: -
      Sets the size of the chunks used to store a chunked layout dataset. -
      Description: -
      H5Pset_chunk sets the size of the chunks used to - store a chunked layout dataset. This function is only valid - for dataset creation property lists. - The ndims parameter currently must be the same size - as the rank of the dataset. The values of the dim - array define the size of the chunks to store the dataset's raw data. - As a side-effect, the layout of the dataset is changed to - H5D_CHUNKED, if it is not already. -
      Parameters: -
      -
      hid_t plist -
      IN: Identifier for property list to query. -
      int ndims -
      IN: The number of dimensions of each chunk. -
      const hsize_t * dim -
      IN: An array containing the size of each chunk. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Pget_chunk -
      Signature: -
      int H5Pget_chunk(hid_t plist, - int max_ndims, - hsize_t * dims - ) -
      Purpose: -
      Retrieves the size of chunks for the raw data of a chunked layout dataset. - -
      Description: -
      H5Pget_chunk retrieves the size of chunks for the - raw data of a chunked layout dataset. - This function is only valid for dataset creation property lists. - At most, max_ndims elements of dims - will be initialized. -
      Parameters: -
      -
      hid_t plist -
      IN: Identifier of property list to query. -
      int max_ndims -
      OUT: Size of the dims array. -
      hsize_t * dims -
      OUT: Array to store the chunk dimensions. -
      -
      Returns: -
      Returns chunk dimensionality successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Pset_alignment -
      Signature: -
      herr_t H5Pset_alignment(hid_t plist, - hsize_t threshold, - hsize_t alignment - ) -
      Purpose: -
      Sets alignment properties of a file access property list. -
      Description: -
      H5Pset_alignment sets the alignment properties - of a file access property list - so that any file object >= THRESHOLD bytes will be aligned on - an address which is a multiple of ALIGNMENT. The addresses - are relative to the end of the user block; the alignment is - calculated by subtracting the user block size from the - absolute file address and then adjusting the address to be a - multiple of ALIGNMENT. -

      - Default values for THRESHOLD and ALIGNMENT are one, implying - no alignment. Generally the default values will result in - the best performance for single-process access to the file. - For MPI-IO and other parallel systems, choose an alignment - which is a multiple of the disk block size. -

      Parameters: -
      -
      hid_t plist -
      IN: Identifier for a file access property list. -
      hsize_t threshold -
      IN: Threshhold value. -
      hsize_t alignment -
      IN: Alignment value. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Pget_alignment -
      Signature: -
      herr_t H5Pget_alignment(hid_t plist, - hsize_t *threshold, - hsize_t *alignment - ) -
      Purpose: -
      Retrieves the current settings for alignment properties from a - file access property list. -
      Description: -
      H5Pget_alignment retrieves the current settings for - alignment properties from a file access property list. - The threshold and/or alignment pointers - may be null pointers (NULL). -
      Parameters: -
      -
      hid_t plist -
      IN: Identifier of a file access property list. -
      hsize_t *threshold -
      OUT: Pointer to location of return threshhold value. -
      hsize_t *alignment -
      OUT: Pointer to location of return alignment value. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Pset_external -
      Signature: -
      herr_t H5Pset_external(hid_t plist, - const char *name, - off_t offset, - hsize_t size - ) -
      Purpose: -
      Adds an external file to the list of external files. -
      Description: -
      H5Pset_external adds an external file to the - list of external files. -

      - If a dataset is split across multiple files then the files - should be defined in order. The total size of the dataset is - the sum of the SIZE arguments for all the external files. If - the total size is larger than the size of a dataset then the - dataset can be extended (provided the data space also allows - the extending). -

      Parameters: -
      -
      hid_t plist -
      IN: Identifier of a dataset creation property list. -
      const char *name -
      IN: Name of an external file. -
      off_t offset -
      IN: Offset, in bytes, from the beginning of the file - to the location in the file where the data starts. -
      hsize_t size -
      IN: Number of bytes reserved in the file for the data. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Pget_external_count -
      Signature: -
      int H5Pget_external_count(hid_t plist, - ) -
      Purpose: -
      Returns the number of external files for a dataset. -
      Description: -
      H5Pget_external_count returns the number of external files - for the specified dataset. -
      Parameters: -
      -
      hid_t plist -
      IN: Identifier of a dataset creation property list. -
      -
      Returns: -
      Returns the number of external files if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Pget_external -
      Signature: -
      herr_t H5Pget_external(hid_t plist, - int idx, - size_t name_size, - char *name, - off_t *offset, - hsize_t *size - ) -
      Purpose: -
      Returns information about an external file. -
      Description: -
      H5Pget_external returns information about an external - file. The external file is specified by its index, idx, - which is a number from zero to N-1, where N is the value - returned by H5Pget_external_count(). - At most name_size characters are copied into the - name array. If the external file name is - longer than name_size with the null terminator, the - return value is not null terminated (similar to strncpy()). -

      - If name_size is zero or name is the - null pointer, the external file name is not returned. - If offset or size are null pointers - then the corresponding information is not returned. -

      Parameters: -
      -
      hid_t plist -
      IN: Identifier of a dataset creation property list. -
      int idx -
      IN: External file index. -
      size_t name_size -
      IN: Maximum length of name array. -
      char *name -
      OUT: Name of the external file. -
      off_t *offset -
      OUT: Pointer to a location to return an offset value. -
      hsize_t *size -
      OUT: Pointer to a location to return the size of the - external file data. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Pset_filter -
      Signature: -
      herr_t H5Pset_filter(hid_t plist, - H5Z_filter_t filter, - unsigned int flags, - size_t cd_nelmts, - const unsigned int cd_values[] - ) -
      Purpose: -
      Adds a filter to the filter pipeline. -
      Description: -
      H5Pset_filter adds the specified - filter and corresponding properties to the - end of an output filter pipeline. - If plist is a dataset creation property list, - the filter is added to the permanent filter pipeline; - if plist is a dataset transfer property list, - the filter is added to the transient filter pipeline. -

      - The array cd_values contains - cd_nelmts integers which are auxiliary data - for the filter. The integer values will be stored in the - dataset object header as part of the filter information. -

      - The flags argument is a bit vector with - the following fields specifying certain general properties - of the filter: -

      - - - - - - -
      H5Z_FLAG_OPTIONAL  If this bit is set then the filter is - optional. If the filter fails (see below) during an - H5Dwrite() operation then the filter is - just excluded from the pipeline for the chunk for which - it failed; the filter will not participate in the - pipeline during an H5Dread() of the chunk. - This is commonly used for compression filters: if the - compression result would be larger than the input then - the compression filter returns failure and the - uncompressed data is stored in the file. If this bit is - clear and a filter fails then H5Dwrite() - or H5Dread() also fails.
      -
      -
      Note: -
      This function currently supports only the permanent filter - pipeline; plist_id must be a dataset creation - property list. -
      Parameters: -
      -
      hid_t plist -
      IN: Property list identifier. -
      H5Z_filter_t filter -
      IN: Filter to be added to the pipeline. -
      unsigned int flags -
      IN: Bit vector specifying certain general properties - of the filter. -
      size_t cd_nelmts -
      IN: Number of elements in cd_values -
      const unsigned int cd_values[] -
      IN: Auxiliary data for the filter. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Pget_nfilters -
      Signature: -
      int H5Pget_nfilters(hid_t plist) -
      Purpose: -
      Returns the number of filters in the pipeline. -
      Description: -
      H5Pget_nfilters returns the number of filters - defined in the filter pipeline associated with the property list - plist. -

      - In each pipeline, the filters are numbered from - 0 through N-1, where N is the value returned - by this function. During output to the file, the filters are - applied in increasing order; during input from the file, they - are applied in decreasing order. -

      - H5Pget_nfilters returns the number of filters - in the pipeline, including zero (0) if there - are none. -

      Note: -
      This function currently supports only the permanent filter - pipeline; plist_id must be a dataset creation - property list. -
      Parameters: -
      -
      hid_t plist -
      IN: Property list identifier. -
      -
      Returns: -
      Returns the number of filters in the pipeline if successful; - otherwise returns FAIL (-1). -
      - - -
      -
      -
      Name: H5Pget_filter -
      Signature: -
      H5Z_filter_t H5Pget_filter(hid_t plist, - int filter_number, - unsigned int *flags, - size_t *cd_nelmts, - unsigned int *cd_values, - size_t namelen, - char name[] - ) -
      Purpose: -
      Returns information about a filter in a pipeline. -
      Description: -
      - H5Pget_filter returns information about a - filter, specified by its filter number, in a filter pipeline, - specified by the property list with which it is associated. -

      - If plist is a dataset creation property list, - the pipeline is a permanent filter pipeline; - if plist is a dataset transfer property list, - the pipeline is a transient filter pipeline. -

      - On input, cd_nelmts indicates the number of entries - in the cd_values array, as allocated by the caller; - on return,cd_nelmts contains the number of values - defined by the filter. -

      - filter_number is a value between zero and - N-1, as described in - H5Pget_nfilters(). - The function will return FAIL (-1) if the filter number is out - of range. -

      - If name is a pointer to an array of at least - namelen bytes, the filter name will be copied - into that array. The name will be null terminated if - namelen is large enough. The filter name returned - will be the name appearing in the file, the name registered - for the filter, or an empty string. -

      - The structure of the flags argument is discussed - in H5Pset_filter(). -

      Note: -
      This function currently supports only the permanent filter - pipeline; plist must be a dataset creation property - list. -
      Parameters: -
      -
      hid_t plist -
      IN: Property list identifier. -
      int filter_number -
      IN: Sequence number within the filter pipeline of - the filter for which information is sought. -
      unsigned int *flags -
      OUT: Bit vector specifying certain general properties - of the filter. -
      size_t *cd_nelmts -
      IN/OUT: Number of elements in cd_values -
      unsigned int *cd_values -
      OUT: Auxiliary data for the filter. -
      size_t namelen -
      IN: Anticipated number of characters in name. -
      char name[] -
      OUT: Name of the filter. -
      -
      Returns: -
      Returns the filter identification number if successful. - Otherwise returns H5Z_FILTER_ERROR (-1). -
      - - -
      -
      -
      Name: H5Pget_driver -
      Signature: -
      H5F_driver_t H5Pget_driver(hid_t plist, - ) -
      Purpose: -
      Returns a low-level file driver identifier. -
      Description: -
      H5Pget_driver returns the identifier of the - low-level file driver. Valid identifiers are: -
        -
      • H5F_LOW_STDIO (0) -
      • H5F_LOW_SEC2 (1) -
      • H5F_LOW_MPIO (2) -
      • H5F_LOW_CORE (3) -
      • H5F_LOW_SPLIT (4) -
      • H5F_LOW_FAMILY (5) -
      -
      Parameters: -
      -
      hid_t plist -
      IN: Identifier of a file access property list. -
      -
      Returns: -
      Returns a low-level driver identifier if successful. - Otherwise returns H5F_LOW_ERROR (-1). -
      - - -
      -
      -
      Name: H5Pset_stdio -
      Signature: -
      herr_t H5Pset_stdio(hid_t plist) -
      Purpose: -
      Sets the low level file driver to use the functions declared - in the stdio.h. -
      Description: -
      H5Pset_stdio sets the low level file driver to use - the functions declared in the stdio.h file: fopen(), fseek() - or fseek64(), fread(), fwrite(), and fclose(). -
      Parameters: -
      -
      hid_t plist -
      IN: Identifier of a file access property list. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Pget_stdio -
      Signature: -
      herr_t H5Pget_stdio(hid_t plist) -
      Purpose: -
      Determines whether the file access property list is set to - the stdio driver. -
      Description: -
      H5Pget_stdio checks to determine whether the - file access property list is set to the stdio driver. - In the future, additional arguments may be added to this - function to match those added to H5Pset_stdio(). -
      Parameters: -
      -
      hid_t plist -
      IN: Identifier of a file access property list. -
      -
      Returns: -
      Returns SUCCEED (0) if the file access propety list is set - to the stdio driver. - Otherwise returns FAIL (-1). -
      - - -
      -
      -
      Name: H5Pset_sec2 -
      Signature: -
      herr_t H5Pset_sec2(hid_t plist, - ) -
      Purpose: -
      Sets the low-level file driver to use the declared functions. -
      Description: -
      H5Pset_sec2 sets the low-level file driver to use - the functions declared - in the unistd.h file: open(), lseek() or lseek64(), read(), - write(), and close(). -
      Parameters: -
      -
      hid_t plist -
      IN: Identifier of a file access property list. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Pget_sec2 -
      Signature: -
      returntype H5Pget_sec2(hid_t plist) -
      Purpose: -
      Checks whether the file access propety list is set - to the sec2 driver. -
      Description: -
      H5Pget_sec2 checks to determine whether the - file access property list is set to the sec2 driver. - In the future, additional arguments may be - added to this function to match those added to H5Pset_sec2(). -
      Parameters: -
      -
      hid_t plist -
      IN: Identifier of a file access property list. -
      -
      Returns: -
      Returns SUCCEED (0) if the file access propety list is set - to the sec2 driver. - Otherwise returns FAIL (-1). -
      - - -
      -
      -
      Name: H5Pset_core -
      Signature: -
      herr_t H5Pset_core(hid_t plist, - size_t increment - ) -
      Purpose: -
      Sets the low-level file driver to use malloc() and free(). -
      Description: -
      H5Pset_core sets the low-level file driver to use - malloc() and free(). - This driver is restricted to temporary files which are not - larger than the amount of virtual memory available. - The increment argument determines the file block size - and memory will be allocated in multiples of INCREMENT bytes. - A liberal increment results in fewer calls to - realloc() and probably less memory fragmentation. -
      Parameters: -
      -
      hid_t plist -
      IN: Identifier of a file access property list. -
      size_t increment -
      IN: File block size in bytes. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: -
      Signature: -
      herr_t H5Pget_core(hid_t plist, - size_t *increment - ) -
      Purpose: -
      Determines whether the file access property list is set - to the core driver. -
      Description: -
      H5Pget_core checks to determine whether the - file access property list is set to the core driver. - On success, the block size is returned through the - increment if it is not the null pointer. - In the future, additional arguments may be added to this - function to match those added to H5Pset_core(). -
      Parameters: -
      -
      hid_t plist -
      IN: Identifier of the file access property list. -
      size_t *increment -
      OUT: Pointer to a location to return the file block size (in bytes). -
      -
      Returns: -
      Returns SUCCEED (0) if the file access propety list is set - to the core driver. - Otherwise returns FAIL (-1). -
      - - -
      -
      -
      Name: H5Pset_split -
      Signature: -
      herr_t H5Pset_split(hid_t plist, - const char *meta_ext, - hid_t meta_plist, - const char *raw_ext, - hid_t raw_plist - ) -
      Purpose: -
      Sets the low-level driver to split meta data from raw data. -
      Description: -
      H5Pset_split sets the low-level driver to - split meta data from raw data, storing meta data in one file and - raw data in another file. The meta file will have a name - which is formed by adding meta_extension (recommended - default value: .meta) to the end of the base name - and will be accessed according to the meta_properties. - The raw file will have a name which is formed by appending - raw_extension (recommended default value: - .raw) to the base name and will be accessed according - to the raw_properties. - Additional parameters may be added to this function in the future. -
      Parameters: -
      -
      hid_t plist -
      IN: Identifier of the file access property list. -
      const char *meta_ext -
      IN: Name of the extension for the metafile filename. - Recommended default value: .meta. -
      hid_t meta_plist -
      IN: Identifier of the meta file access property list. -
      const char *raw_ext -
      IN: Name extension for the raw file filename. - Recommended default value: .raw. -
      hid_t raw_plist -
      IN: Identifier of the raw file access property list. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Pget_split -
      Signature: -
      herr_t H5Pget_split(hid_t plist, - size_t meta_ext_size, - char *meta_ext, - hid_t *meta_properties, - size_t raw_ext_size, - char *raw_ext, - hid_t *raw_properties - ) -
      Purpose: -
      Determines whether the file access property list is set - to the split driver. -
      Description: -
      H5Pget_split checks to determine whether the file - access property list is set to the split driver. - On successful return, - meta_properties and raw_properties will - point to copies of the meta and raw access property lists - which should be closed by calling H5Pclose() when - the application is finished with them, but if the meta and/or - raw file has no property list then a negative value is - returned for that property list identifier. Also, if - meta_extension and/or raw_extension are - non-null pointers, at most meta_ext_size or - raw_ext_size characters of the meta or raw file name - extension will be copied to the specified buffer. If the - actual name is longer than what was requested then the result - will not be null terminated (similar to - strncpy()). In the future, additional arguments - may be added to this function to match those added to - H5Pset_split(). -
      Parameters: -
      -
      hid_t plist -
      IN: Identifier of the file access property list. -
      size_t meta_ext_size -
      IN: Number of characters of the meta file extension to be - copied to the meta_ext buffer. -
      OUT *meta_ext -
      IN: Meta file extension. -
      hid_t *meta_properties -
      OUT: Pointer to a copy of the meta file access property list. -
      size_t raw_ext_size -
      IN: Number of characters of the raw file extension to be - copied to the raw_ext buffer. -
      char *raw_ext -
      OUT: Raw file extension. -
      hid_t *raw_properties -
      OUT: Pointer to a copy of the raw file access property list. -
      -
      Returns: -
      Returns SUCCEED (0) if the file access propety list is set - to the split driver. - Otherwise returns FAIL (-1). -
      - - -
      -
      -
      Name: H5Pset_family -
      Signature: -
      herr_t H5Pset_family(hid_t plist, - hsize_t memb_size, - hid_t memb_plist - ) -
      Purpose: -
      Sets the file access properties list to the family - driver. -
      Description: -
      Original version. Edited version below. -
      H5Pset_family sets the file access properties - to use the family - driver; any previously defined driver properties are erased - from the property list. Each member of the file family will - use member_properties as its file access property - list. The memb_size argument gives the logical size - in bytes of each family member but the actual size could be - smaller depending on whether the file contains holes. The - member size is only used when creating a new file or - truncating an existing file; otherwise the member size comes - from the size of the first member of the family being - opened. Note: if the size of the off_t type is - four bytes then the maximum family member size is usually - 2^31-1 because the byte at offset 2,147,483,647 is generally - inaccessable. Additional parameters may be added to this - function in the future. -
      Parameters: -
      -
      hid_t plist -
      IN: Identifier of the file access property list. -
      hsize_t memb_size -
      IN: Logical size, in bytes, of each family member. -
      hid_t memb_plist -
      IN: Identifier of the file access property list - for each member of the family. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Pset_family -
      Signature: -
      herr_t H5Pset_family(hid_t plist, - hsize_t memb_size, - hid_t memb_plist - ) -
      Purpose: -
      Sets the file access properties list to the family - driver. -
      Description: -
      Edited version. Original version above. -
      H5Pset_family sets the file access properties - to use the family driver; any previously defined - driver properties are erased from the property list. - See File Families - in the HDF5 User's Guide for a discussion - of file families. -

      - Each member of the file family will use memb_plist - as its file access property list. -

      - The memb_size argument gives the logical size - in bytes of each family member; the actual size could be - smaller depending on whether the file contains holes. - The member size is only used when creating a new file or - truncating an existing file; otherwise the member size comes - from the size of the first member of the family being - opened. -

      - Note: If the size of the off_t type is - four bytes, then the maximum family member size is usually - 2^31-1 because the byte at offset 2,147,483,647 is generally - inaccessable. -

      - Additional parameters may be added to this function in the - future. -

      Parameters: -
      -
      hid_t plist -
      IN: Identifier of the file access property list. -
      hsize_t memb_size -
      IN: Logical size, in bytes, of each family member. -
      hid_t memb_plist -
      IN: Identifier of the file access property list - for each member of the family. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Pget_family -
      Signature: -
      herr_t H5Pget_family(hid_t tid, - hsize_t *memb_size, - hid_t *memb_plist - ) -
      Purpose: -
      Determines whether the file access property list - is set to the family driver. -
      Description: -
      H5Pget_family checks to determine whether the - file access property list is set to the family driver. - On successful return, - access_properties will point to a copy of the member - access property list which should be closed by calling - H5Pclose() when the application is finished with - it. If memb_size is non-null then it will contain - the logical size in bytes of each family member. In the - future, additional arguments may be added to this function to - match those added to H5Pset_family(). -
      Parameters: -
      -
      hid_t plist -
      IN: Identifier of the file access property list. -
      hsize_t *memb_size -
      OUT: Logical size, in bytes, of each family member. -
      hid_t *memb_plist -
      OUT: Identifier of the file access property list - for each member of the family. -
      -
      Returns: -
      Returns SUCCEED (0) if the file access propety list is set - to the family driver. - Otherwise returns FAIL (-1). -
      - - -
      -
      -
      Name: H5Pset_cache -
      Signature: -
      herr_t H5Pset_cache(hid_t plist, - int mdc_nelmts, - size_t rdcc_nbytes, - double rdcc_w0 - ) -
      Purpose: -
      Sets the number of elements in the meta data cache and the - total number of bytes in the raw data chunk cache. -
      Description: -
      H5Pset_cache sets the number of elements (objects) - in the meta data cache and the total number of bytes in the - raw data chunk cache. -

      - Sets or queries the meta data cache and raw data chunk cache - parameters. The plist is a file access property - list. The number of elements (objects) in the meta data cache - is mdc_nelmts. The total size of the raw data chunk - cache and the preemption policy is rdcc_nbytes and - w0. For H5Pget_cache() any (or all) of - the pointer arguments may be null pointers. -

      - The RDCC_W0 value should be between 0 and 1 inclusive and - indicates how much chunks that have been fully read are - favored for preemption. A value of zero means fully read - chunks are treated no differently than other chunks (the - preemption is strictly LRU) while a value of one means fully - read chunks are always preempted before other chunks. -

      Parameters: -
      -
      hid_t plist -
      IN: Identifier of the file access property list. -
      int mdc_nelmts -
      IN: Number of elements (objects) in the meta data cache. -
      size_t rdcc_nbytes -
      IN: Total size of the raw data chunk cache, in bytes. -
      double rdcc_w0 -
      IN: Preemption policy. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Pget_cache -
      Signature: -
      herr_t H5Pget_cache(hid_t plist, - int *mdc_nelmts, - size_t *rdcc_nbytes, - double *rdcc_w0 - ) -
      Purpose: -
      Retrieves maximun sizes of meta data cache and RDCC_WO. -
      Description: -
      Retrieves the maximum possible number of elements in the meta - data cache and the maximum possible number of bytes and the - RDCC_W0 value in the raw data chunk cache. Any (or all) - arguments may be null pointers in which case the corresponding - datum is not returned. -
      Parameters: -
      -
      hid_t plist -
      IN: Identifier of the file access property list. -
      int *mdc_nelmts -
      IN/OUT: Number of elements (objects) in the meta data cache. -
      size_t *rdcc_nbytes -
      IN/OUT: Total size of the raw data chunk cache, in bytes. -
      double *rdcc_w0 -
      IN/OUT: Preemption policy. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Pset_buffer -
      Signature: -
      herr_t H5Pset_buffer(hid_t plist, - size_t size, - void *tconv, - void *bkg - ) -
      Purpose: -
      Sets type conversion and background buffers. -
      Description: -
      Given a dataset transfer property list, H5Pset_buffer - sets the maximum size - for the type conversion buffer and background buffer and - optionally supply pointers to application-allocated buffers. - If the buffer size is smaller than the entire amount of data - being transfered between application and file, and a type - conversion buffer or background buffer is required then - strip mining will be used. However, certain restrictions - apply for the size of buffer which can be used for strip - mining. For instance, when strip mining a 100x200x300 - hyperslab of a simple data space the buffer must be large - enough to hold a 1x200x300 slab. -

      - If tconv and/or bkg are null pointers, - then buffers will be allocated and freed during the data transfer. -

      - The default value for the maximum buffer is 1 Mb. -

      Parameters: -
      -
      hid_t plist -
      IN: Identifier for the dataset transfer property list. -
      size_t size -
      IN: Size for the type conversion and background buffers. -
      void tconv -
      IN: Pointer to application-allocated type conversion buffer. -
      void bkg -
      IN: Pointer to application-allocated background buffer. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Pget_buffer -
      Signature: -
      size_t H5Pget_buffer(hid_t plist, - void **tconv, - void **bkg - ) -
      Purpose: -
      Reads buffer settings. -
      Description: -
      H5Pget_buffer reads values previously set - with H5Pset_buffer(). -
      Parameters: -
      -
      hid_t plist -
      IN: Identifier for the dataset transfer property list. -
      void **tconv -
      OUT: Address of the pointer to application-allocated - type conversion buffer. -
      void **bkg -
      OUT: Address of the pointer to application-allocated - background buffer. -
      -
      Returns: -
      Returns buffer size if successful; - otherwise 0 on failure. -
      - - -
      -
      -
      Name: H5Pset_preserve -
      Signature: -
      herr_t H5Pset_preserve(hid_t plist, - hbool_t status - ) -
      Purpose: -
      Sets the dataset transfer property list status to TRUE or FALSE. -
      Description: -
      H5Pset_preserve sets the - dataset transfer property list status to TRUE or FALSE. -

      - When reading or writing compound data types and the - destination is partially initialized and the read/write is - intended to initialize the other members, one must set this - property to TRUE. Otherwise the I/O pipeline treats the - destination datapoints as completely uninitialized. -

      Parameters: -
      -
      hid_t plist -
      IN: Identifier for the dataset transfer property list. -
      hbool_t status -
      IN: Status of for the dataset transfer property list - (TRUE/FALSE). -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Pget_preserve -
      Signature: -
      int H5Pget_preserve(hid_t plist) -
      Purpose: -
      Checks status of the dataset transfer property list. -
      Description: -
      H5Pget_preserve checks the status of the - dataset transfer property list. -
      Parameters: -
      -
      hid_t plist -
      IN: Identifier for the dataset transfer property list. -
      -
      Returns: -
      Returns TRUE or FALSE if successful; - otherwise FAIL (-1). -
      - - - - - - - - -
      -
      -
      Name: H5Pset_deflate -
      Signature: -
      herr_t H5Pset_deflate(hid_t plist, - int level - ) -
      Purpose: -
      Sets compression method and compression level. -
      Description: -
      H5Pset_deflate sets the compression method for a - dataset creation property list to H5D_COMPRESS_DEFLATE - and the compression level to level<>/code>, which should - be a value from zero to nine, inclusive. - Lower compression levels are faster but result in less compression. - This is the same algorithm as used by the GNU gzip program. -
      Parameters: -
      -
      hid_t plist -
      IN: Identifier for the dataset creation property list. -
      int level -
      IN: Compression level. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - - - - -
      -
      -HDF5 Reference Manual  -H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5P   -H5S   -H5T   -H5Z   -Tools   - -
      -
      - - -
      -HDF Help Desk - -
      -Last modified: 25 August 1998 - - - - diff --git a/doc/src/RM_H5R.html b/doc/src/RM_H5R.html deleted file mode 100644 index 4e1fd7c..0000000 --- a/doc/src/RM_H5R.html +++ /dev/null @@ -1,248 +0,0 @@ - - -HDF5/H5R Draft API Specification - - - - -
      -
      -HDF5 Reference Manual  -H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5P   -H5R   -H5S   -H5T   -H5Z   -Tools   - -
      -
      - -
      -

      H5R: Ragged Array Interface

      -
      - -

      Ragged Array API Functions

      - - -
      - -The H5R Interface is strictly experimental at this time; -the interface may change dramatically or support for ragged arrays -may be unavailable in future in releases. As a result, future releases -may be unable to retrieve data stored with this interface. -

      Use these functions at your own risk!
      -Do not create any archives using this interface!
      -
      -
      -

      -These functions enable the user to store and retrieve data in ragged arrays. - - - - -
      - -       - -
      - -

      - - - -


      -
      -
      Name: H5Rcreate -
      Signature: -
      H5Rcreate( , - , - - ) -
      Purpose: -
      -
      Description: -
      H5Rcreate -
      Parameters: -
      -
      -
      -
      -
      -
      -
      -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Ropen -
      Signature: -
      H5Ropen( , - , - - ) -
      Purpose: -
      -
      Description: -
      H5Ropen -
      Parameters: -
      -
      -
      -
      -
      -
      -
      -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Rclose -
      Signature: -
      H5Rclose( , - , - - ) -
      Purpose: -
      -
      Description: -
      H5Rclose -
      Parameters: -
      -
      -
      -
      -
      -
      -
      -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Rwrite -
      Signature: -
      H5Rwrite( , - , - - ) -
      Purpose: -
      -
      Description: -
      H5Rwrite -

      - Datatype conversion takes place at the time of a read or write - and is automatic. See the - Data Conversion - section of The Data Type Interface (H5T) in the - HDF5 User's Guide for a discussion of - data conversion, including the range of conversions currently - supported by the HDF5 libraries. -

      Parameters: -
      -
      -
      -
      -
      -
      -
      -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Rread -
      Signature: -
      H5Rread( , - , - - ) -
      Purpose: -
      -
      Description: -
      H5Rread -

      - Datatype conversion takes place at the time of a read or write - and is automatic. See the - Data Conversion - section of The Data Type Interface (H5T) in the - HDF5 User's Guide for a discussion of - data conversion, including the range of conversions currently - supported by the HDF5 libraries. -

      Parameters: -
      -
      -
      -
      -
      -
      -
      -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -HDF5 Reference Manual  -H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5P   -H5R   -H5S   -H5T   -H5Z   -Tools   - -
      -
      - -
      -HDF Help Desk - -
      -Last modified: 1 September 1998 - - - diff --git a/doc/src/RM_H5S.html b/doc/src/RM_H5S.html deleted file mode 100644 index ae69b20..0000000 --- a/doc/src/RM_H5S.html +++ /dev/null @@ -1,749 +0,0 @@ - - -HDF5/H5S Draft API Specification - - - - -
      -
      -HDF5 Reference Manual  -H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5P   -H5S   -H5T   -H5Z   -Tools   - -
      -
      - -
      -

      H5S: Dataspace Interface

      -
      - -

      Dataspace Object API Functions

      - -These functions create and manipulate the dataspace in which to store the -elements of a dataset. - - - -
      - -       - -       - -
      - -

      -The following H5S functions are included in the HDF5 specification, -but have not yet been implemented. They are described in the -The Dataspace Interface (H5S) section -of the HDF5 User's Guide.. - - -
      -
        -
      • H5Scommit -
      • H5Sis_subspace -
      • H5Slock -
      • H5Sopen -
      -
             -
        -
      • H5Sselect_name -
      • H5Sselect_op -
      • H5Sselect_order -
      -
             -
        -
      • H5Ssubspace -
      • H5Ssubspace_name -
      • H5Ssubspace_location -
      -
      - - -


      -
      -
      Name: H5Screate -
      Signature: -
      hid_t H5Screate(H5S_class_t type) -
      Purpose: -
      Creates a new dataspace of a specified type. -
      Description: -
      H5Screate creates a new dataspace of a particular - type. - The types currently supported are H5S_SCALAR, - H5S_SIMPLE, and H5S_NONE; - others are planned to be added later. The H5S_NONE - dataspace can only hold a selection, not an extent. -
      Parameters: -
      -
      H5S_class_t type -
      The type of dataspace to be created. -
      -
      Returns: -
      Returns a dataspace identifier if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Screate_simple -
      Signature: -
      hid_t H5Screate_simple(int rank, - const hsize_t * dims, - const hsize_t * maxdims - ) -
      Purpose: -
      Creates a new simple data space and opens it for access. -
      Description: -
      H5Screate_simple creates a new simple data space - and opens it for access. The rank is the number of - dimensions used in the dataspace. - The dims argument is the size - of the simple dataset and the maxdims argument is - the upper limit on the size of the dataset. maxdims - may be the null pointer in which case the upper limit is the - same as dims. If an element of maxdims - is zero then the corresponding dimension is unlimited, otherwise - no element of maxdims should be smaller than the - corresponding element of dims. The dataspace - identifier returned from this function should be released with - H5Sclose or resource leaks will occur. -
      Parameters: -
      -
      int rank -
      Number of dimensions of dataspace. -
      const hsize_t * dims -
      An array of the size of each dimension. -
      const hsize_t * maxdims -
      An array of the maximum size of each dimension. -
      -
      Returns: -
      Returns a dataspace identifier if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Scopy -
      Signature: -
      hid_t H5Scopy(hid_t space_id - ) -
      Purpose: -
      Creates an exact copy of a dataspace. -
      Description: -
      H5Scopy creates a new dataspace which is an exact - copy of the dataspace identified by space_id. - The dataspace identifier returned from this function should be - released with H5Sclose or resource leaks will occur. -
      Parameters: -
      -
      hid_t space_id -
      Identifier of dataspace to copy. -
      -
      Returns: -
      Returns a dataspace identifier if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Sselect_elements -
      Signature: -
      herr_t H5Sselect_elements(hid_t space_id, - dh5s_selopt_t op, - const size_t num_elements, - const hssize_t *coord[ ] - ) -
      Purpose: -
      Selects array elements to be included in the selection for a dataspace. -
      Description: -
      H5Sselect_elements selects array elements to be - included in the selection for the space_id - dataspace. The number of elements selected must be set with - the num_elements. The coord array - is a two-dimensional array of size dataspace rank - by num_elements (ie. a list of coordinates in - the array). The order of the element coordinates in the - coord array also specifies the order in which - the array elements are iterated through when I/O is performed. - Duplicate coordinate locations are not checked for. -

      - The selection operator op determines how the - new selection is to be combined with the previously existing - selection for the dataspace. Currently, only the - H5S_SELECT_SET operator is supported, which - replaces the existing selection with the parameters from - this call. When operators other than H5S_SELECT_SET - are used to combine a new selection with an existing selection, - the selection ordering is reset to 'C' array ordering. -

      Parameters: -
      -
      hid_t space_id -
      Identifier of the dataspace. -
      dh5s_selopt_t op -
      operator specifying how the new selection is to be - combined with the existing selection for the dataspace. -
      const size_t num_elements -
      Number of elements to be selected. -
      const hssize_t *coord[ ] -
      A 2-dimensional array specifying the coordinates of the - elements being selected. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Sselect_all -
      Signature: -
      herr_t H5Sselect_all(hid_t space_id) -
      Purpose: -
      Selects the entire dataspace. -
      Description: -
      H5Sselect_all selects the entire extent - of the dataspace space_id. -

      - More specifically, H5Sselect_all selects - the special 5S_SELECT_ALL region for the dataspace - space_id. H5S_SELECT_ALL selects the - entire dataspace for any dataspace it is applied to. -

      Parameters: -
      -
      hid_t space_id -
      IN: The identifier for the dataspace in which the - selection is being made. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Sselect_none -
      Signature: -
      herr_t H5Sselect_none(hid_t space_id) -
      Purpose: -
      Resets the selection region to include no elements. -
      Description: -
      H5Sselect_none resets the selection region - for the dataspace space_id to include no elements. -
      Parameters: -
      -
      hid_t space_id -
      IN: The identifier for the dataspace in which the - selection is being reset. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Sselect_valid -
      Signature: -
      hbool_t H5Sselect_valid(hid_t space_id) -
      Purpose: -
      Verifies that the selection is within the extent of the dataspace. -
      Description: -
      H5Sselect_valid verifies that the selection - for the dataspace space_id is within the extent - of the dataspace if the current offset for the dataspace is used. -
      Parameters: -
      -
      hid_t space_id -
      The identifier for the dataspace in which the - selection is being reset. -
      -
      Returns: -
      Returns TRUE if the selection is contained within - the extent and FALSE if it is not. - Returns FAIL (-1) on error conditions - such as the selection or extent not being defined. -
      - - -
      -
      -
      Name: H5Sget_simple_extent_npoints -
      Signature: -
      hsize_t H5Sget_simple_extent_npoints(hid_t space_id) -
      Purpose: -
      Determines the number of elements in a dataspace. -
      Description: -
      H5Sget_simple_extent_npoints determines the number of elements - in a dataspace. For example, a simple 3-dimensional dataspace - with dimensions 2, 3, and 4 would have 24 elements. -
      Parameters: -
      -
      hid_t space_id -
      ID of the dataspace object to query -
      -
      Returns: -
      Returns the number of elements in the dataspace if successful; - otherwise returns 0. -
      - - -
      -
      -
      Name: H5Sget_select_npoints -
      Signature: -
      hsize_t H5Sget_select_npoints(hid_t space_id) -
      Purpose: -
      Determines the number of elements in a dataspace. -
      Description: -
      H5Sget_select_npoints determines the number of elements - in the current selection of a dataspace. -
      Parameters: -
      -
      hid_t space_id -
      Dataspace identifier. -
      -
      Returns: -
      Returns the number of elements in the selection if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Sget_simple_extent_ndims -
      Signature: -
      int H5Sget_simple_extent_ndims(hid_t space_id) -
      Purpose: -
      Determines the dimensionality of a dataspace. -
      Description: -
      H5Sget_simple_extent_ndims determines the dimensionality (or rank) - of a dataspace. -
      Parameters: -
      -
      hid_t space_id -
      Identifier of the dataspace -
      -
      Returns: -
      Returns the number of dimensions in the dataspace if successful; - otherwise FAIL (-1) -
      - - -
      -
      -
      Name: H5Sget_simple_extent_dims -
      Signature: -
      int H5Sget_simple_extent_dims(hid_t space_id, - hsize_t *dims, - hsize_t *maxdims - ) -
      Purpose: -
      Retrieves dataspace dimension size and maximum size. -
      Description: -
      H5Sget_simple_extent_dims returns the size and maximum sizes - of each dimension of a dataspace through the dims - and maxdims parameters. -
      Parameters: -
      -
      hid_t space_id -
      IN: Identifier of the dataspace object to query -
      hsize_t *dims -
      OUT: Pointer to array to store the size of each dimension. -
      hsize_t *maxdims -
      OUT: Pointer to array to store the maximum size of each dimension. -
      -
      Returns: -
      Returns the number of dimensions in the dataspace if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Sget_space_type -
      Signature: -
      H5S_class_t H5Sget_space_type(hid_t space_id) -
      Purpose: -
      Determine the current class of a dataspace. -
      Description: -
      H5Sget_space_type queries a dataspace to determine the - current class of a dataspace. -

      - The function returns a class name, one of the following: - H5S_SCALAR, - H5S_SIMPLE, or - H5S_NONE. -

      Parameters: -
      -
      hid_t space_id -
      Dataspace identifier. -
      -
      Returns: -
      Returns a dataspace class name if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Sset_extent_simple -
      Signature: -
      herr_t H5Sset_extent_simple(hid_t space_id, - int rank, - const hsize_t *current_size, - const hsize_t *maximum_size - ) -
      Purpose: -
      Sets or resets the size of an existing dataspace. -
      Description: -
      H5Sset_extent_simple sets or resets the size of - an existing dataspace. -

      - rank is the dimensionality, or number of - dimensions, of the dataspace. -

      - current_size is an array of size rank - which contains the new size of each dimension in the dataspace. - maximum_size is an array of size rank - which contains the maximum size of each dimension in the - dataspace. -

      - Any previous extent is removed from the dataspace, the dataspace - type is set to H5S_SIMPLE, and the extent is set as - specified. -

      Parameters: -
      -
      hid_t space_id -
      Dataspace identifier. -
      int rank -
      Rank, or dimensionality, of the dataspace. -
      const hsize_t *current_size -
      Array containing current size of dataspace. -
      const hsize_t *maximum_size -
      Array containing maximum size of dataspace. -
      -
      Returns: -
      Returns a dataspace identifier if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Sis_simple -
      Signature: -
      hbool_t H5Sis_simple(hid_t space_id) -
      Purpose: -
      Determines whether a dataspace is a simple dataspace. -
      Description: -
      H5Sis_simple determines whether a dataspace is - a simple dataspace. [Currently, all dataspace objects are simple - dataspaces, complex dataspace support will be added in the future] -
      Parameters: -
      -
      hid_t space_id -
      Identifier of the dataspace to query -
      -
      Returns: -
      Returns TRUE or FALSE if Successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Soffset_simple -
      Signature: -
      herr_t H5Soffset_simple(hid_t space_id, - const hssize_t *offset - ) -
      Purpose: -
      Sets the offset of a simple dataspace. -
      Description: -
      H5Soffset_simple sets the offset of a - simple dataspace space_id. The offset - array must be the same number of elements as the number of - dimensions for the dataspace. If the offset - array is set to NULL, the offset for the dataspace - is reset to 0. -

      - This function allows the same shaped selection to be moved - to different locations within a dataspace without requiring it - to be redefined. -

      Parameters: -
      -
      hid_t space_id -
      IN: The identifier for the dataspace object to reset. -
      const hssize_t *offset -
      IN: The offset at which to position the selection. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Sextent_class -
      Signature: -
      H5S_class_t H5Sextent_class(hid_t space_id) -
      Purpose: -
      Determine the current class of a dataspace. -
      Description: -
      H5Sextent_class queries a dataspace to determine the - current class of a dataspace. -

      - The function returns a class name, one of the following: - H5S_SCALAR, - H5S_SIMPLE. -

      Parameters: -
      -
      hid_t space_id -
      Dataspace identifier. -
      -
      Returns: -
      Returns a dataspace class name if successful; - otherwise H5S_NO_CLASS (-1). -
      - - -
      -
      -
      Name: H5Sextent_copy -
      Signature: -
      herr_t H5Sextent_copy(hid_t dest_space_id, - hid_t source_space_id - ) -
      Purpose: -
      Copies the extent of a dataspace. -
      Description: -
      H5Sextent_copy copies the extent from - source_space_id to dest_space_id. - This action may change the type of the dataspace. -
      Parameters: -
      -
      hid_t dest_space_id -
      IN: The identifier for the dataspace from which - the extent is copied. -
      hid_t source_space_id -
      IN: The identifier for the dataspace to which - the extent is copied. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Sset_extent_none -
      Signature: -
      herr_t H5Sset_extent_none(hid_t space_id) -
      Purpose: -
      Removes the extent from a dataspace. -
      Description: -
      H5Sset_extent_none removes the extent from - a dataspace and sets the type to H5S_NONE. -
      Parameters: -
      -
      hid_t space_id -
      The identifier for the dataspace from which - the extent is to be removed. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Sselect_hyperslab -
      Signature: -
      herr_t H5Sselect_hyperslab(hid_t space_id, - h5s_selopt_top, - const hssize_t *start, - const hsize_t *stride - const hsize_t *count, - const hsize_t *block - ) -
      Purpose: -
      Selects a hyperslab region to add to the current selected region. -
      Description: -
      H5Sselect_hyperslab selects a hyperslab region - to add to the current selected region for the dataspace - specified by space_id. -

      - The start, stride, count, - and block arrays must be the same size as the rank - of the dataspace. -

      - The selection operator op determines how the new - selection is to be combined with the already existing selection - for the dataspace. -

      - Currently, only the H5S_SELECT_SET operator is - supported; it replaces the existing selection with the - parameters from this call. Overlapping blocks are not - supported with the H5S_SELECT_SET operator. -

      -The start array determines the starting coordinates -of the hyperslab -to select. -

      -The stride array chooses array locations -from the dataspace -with each value in the stride array determining how -many elements to move -in each dimension. Setting a value in the stride -array to 1 moves to -each element in that dimension of the dataspace; setting a value of 2 in a -location in the stride array moves to every other -element in that -dimension of the dataspace. In other words, the stride -determines the -number of elements to move from the start location -in each dimension. -Stride values of 0 are not allowed. If the stride -parameter is NULL, -a contiguous hyperslab is selected (as if each value in the -stride array -was set to all 1's). -

      -The count array determines how many blocks to -select from the dataspace, in each dimension. -

      -The block array determines -the size of the element block selected from the dataspace. -If the block -parameter is set to NULL, the block size defaults -to a single element -in each dimension (as if the block array was set to all 1's). -

      -For example, in a 2-dimensional dataspace, setting -start to [1,1], -stride to [4,4], count to [3,7], and -block to [2,2] selects -21 2x2 blocks of array elements starting with location (1,1) and selecting -blocks at locations (1,1), (5,1), (9,1), (1,5), (5,5), etc. -

      -Regions selected with this function call default to C order iteration when -I/O is performed. -

      Parameters: -
      -
      hid_t space_id -
      IN: Identifier of dataspace selection to modify -
      H5S_seloper_t op -
      IN: Operation to perform on current selection. -
      const hssize_t *start -
      IN: Offset of start of hyperslab -
      const hsize_t *count -
      IN: Number of blocks included in hyperslab. -
      const hsize_t *stride -
      IN: Hyperslab stride. -
      const hsize_t *block -
      IN: Size of block in hyperslab. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Sclose -
      Signature: -
      herr_t H5Sclose(hid_t space_id - ) -
      Purpose: -
      Releases and terminates access to a dataspace. -
      Description: -
      H5Sclose releases a dataspace. - Further access through the dataspace identifier is illegal. - Failure to release a dataspace with this call will - result in resource leaks. -
      Parameters: -
      -
      hid_t space_id -
      Identifier of dataspace to release. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -HDF5 Reference Manual  -H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5P   -H5S   -H5T   -H5Z   -Tools   - -
      -
      - -
      -HDF Help Desk - -
      -Last modified: 1 September 1998 - - - diff --git a/doc/src/RM_H5T.html b/doc/src/RM_H5T.html deleted file mode 100644 index 086b9cc..0000000 --- a/doc/src/RM_H5T.html +++ /dev/null @@ -1,1769 +0,0 @@ - - -HDF5/H5T Draft API Specification - - - - -
      -
      -HDF5 Reference Manual  -H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5P   -H5S   -H5T   -H5Z   -Tools   - -
      -
      - -
      -

      H5T: Datatype Interface

      -
      - -

      Datatype Object API Functions

      - -These functions create and manipulate the datatype which describes elements -of a dataset. - - - -
      -General Datatype Operations -
    7. H5Tcreate -
    8. H5Topen -
    9. H5Tcommit -
    10. H5Tcommitted -
    11. H5Tcopy -
    12. H5Tequal -
    13. H5Tlock -
    14. H5Tclose -

      -Atomic Datatype Properties -

    15. H5Tget_class -
    16. H5Tget_size -
    17. H5Tset_size -
    18. H5Tget_order -
    19. H5Tset_order -
    20. H5Tget_precision -
    21. H5Tset_precision -
    22. H5Tget_offset -
    23. H5Tset_offset -
    24.        -
    25. H5Tget_pad -
    26. H5Tset_pad -
    27. H5Tget_sign -
    28. H5Tset_sign -
    29. H5Tget_fields -
    30. H5Tset_fields -
    31. H5Tget_ebias -
    32. H5Tset_ebias -
    33. H5Tget_norm -
    34. H5Tset_norm -
    35. H5Tget_inpad -
    36. H5Tset_inpad -
    37. H5Tget_cset -
    38. H5Tset_cset -
    39. H5Tget_strpad -
    40. H5Tset_strpad -

      -Properties of Compound Types -

    41. H5Tget_class -
    42. H5Tget_size -
    43.        -
    44. H5Tget_nmembers -
    45. H5Tget_member_name -
    46. H5Tget_member_offset -
    47. H5Tget_member_dims -
    48. H5Tget_member_type -
    49. H5Tinsert -
    50. H5Tpack -
    51. H5Tinsert_array -

      -Conversion Functions -

    52. H5Tconvert -
    53. H5Tfind -
    54. H5Tset_overflow -
    55. H5Tget_overflow -
    56. H5Tregister_hard -
    57. H5Tregister_soft -
    58. H5Tunregister -
    59. - -

      -The Datatype interface, H5T, provides a mechanism to describe the - storage format of individual data points of a data set and is - hopefully designed in such a way as to allow new features to be - easily added without disrupting applications that use the data - type interface. A dataset (the H5D interface) is composed of a - collection or raw data points of homogeneous type organized - according to the data space (the H5S interface). - -

      -A datatype is a collection of datatype properties, all of - which can be stored on disk, and which when taken as a whole, - provide complete information for data conversion to or from that - datatype. The interface provides functions to set and query - properties of a datatype. - -

      -A data point is an instance of a datatype, - which is an instance of a type class. We have defined - a set of type classes and properties which can be extended at a - later time. The atomic type classes are those which describe - types which cannot be decomposed at the datatype interface - level; all other classes are compound. - -

      -See The Datatype Interface (H5T) -in the HDF5 User's Guide for further information, including a complete list of all supported datatypes. - - -


      -
      -
      Name: H5Topen -
      Signature: -
      hid_tH5Topen(hid_t loc_id, - const char * name - ) -
      Purpose: -
      Opens a named datatype. -
      Description: -
      H5Topen opens a named datatype at the location - specified by loc_id and returns an identifier - for the datatype. loc_id is either a file or - group identifier. The identifier should eventually be closed - by calling H5Tclose() to release resources. -
      Parameters: -
      -
      hid_t loc_id -
      A file, group, or datatype identifier. -
      const char * name -
      A datatype name. -
      -
      Returns: -
      Returns a named datatype identifier if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Tcommit -
      Signature: -
      herr_tH5Tcommit(hid_t loc_id, - const char * name, - hid_t type - ) -
      Purpose: -
      Commits a transient datatype to a file, creating a new named datatype. -
      Description: -
      H5Tcommit commits a transient datatype - (not immutable) to a file, turned it into a named datatype. - The loc_id is either a file or group identifier - which, when combined with name, refers to a new - named datatype. -
      Parameters: -
      -
      hid_t loc_id -
      A file or group identifier. -
      const char * name -
      A datatype name. -
      hid_t type -
      A datatype identifier. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Tcommitted -
      Signature: -
      hbool_tH5Tcommitted(hid_t type) -
      Purpose: -
      Determines whether a datatype is a named type or a transient type. -
      Description: -
      H5Tcommitted queries a type to determine whether - the type specified by the type identifier - is a named type or a transient type. If this function returns - a positive value, then the type is named (that is, it has been - committed, perhaps by some other application). Datasets which - return committed datatypes with H5Dget_type() are - able to share the datatype with other datasets in the same file. -
      Parameters: -
      -
      hid_t type -
      Datatype identifier. -
      -
      Returns: -
      The successful return values are TRUE if committed, else FALSE. - Otherwise returns FAIL (-1). -
      - - -
      -
      -
      Name: H5Tinsert_array -
      Signature: -
      herr_tH5Tinsert_array(hid_t parent_id, - const char *name, - size_t offset, - int ndims, - const size_t *dim, - const int *perm, - hid_t member_id - ) -
      Purpose: -
      Adds an array member to a compound datatype. -
      Description: -
      H5Tinsert_array adds a new member to the - compound datatype parent_id. - The member is an array with ndims dimensionality - and the size of the array is dim. - The new member's name, name, must be unique - within the compound datatype. - The offset argument defines the start of the - member in an instance of the compound datatype and - member_id is the type identifier of the new member. - The total member size should be relatively small. -
      Parameters: -
      -
      hid_t parent_id -
      Identifier of the parent compound datatype. -
      const char *name -
      Name of new member. -
      size_t offset -
      Offset to start of new member within compound datatype. -
      int ndims -
      Dimensionality of new member. -
      const size_t *dim -
      Size of new member array. -
      const int *perm -
      Pointer to buffer to store the permutation vector of - the field. -
      hid_t member_id -
      Identifier of the datatype of the new member. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Tfind -
      Signature: -
      H5T_conv_t H5Tfind(hid_t src_id, - hid_t dst_id, - H5T_cdata_t **pcdata - ) -
      Purpose: -
      Finds a conversion function. -
      Description: -
      H5Tfind finds a conversion function that can - handle a conversion from type src_id to type - dst_id. - The pcdata argument is a pointer to a pointer - to type conversion data which was created and initialized - by the soft type conversion function of this path when the - conversion function was installed on the path. -
      Parameters: -
      -
      hid_t src_id -
      Identifier for the source datatype. -
      hid_t dst_id -
      Identifier for the destination datatype. -
      H5T_cdata_t **pcdata -
      Pointer to type conversion data. -
      -
      Returns: -
      Returns a pointer to a suitable conversion function if successful. - Otherwise returns NULL. -
      - - -
      -
      -
      Name: H5Tconvert -
      Signature: -
      herr_t H5Tconvert(hid_t src_id, - hid_t dst_id, - size_t nelmts, - void *buf, - void *background - ) -
      Purpose: -
      Converts data from between specified datatypes. -
      Description: -
      H5Tconvert converts nelmts elements - from the type specified by the src_id identifier - to type dst_id. - The source elements are packed in buf and on return - the destination will be packed in buf. - That is, the conversion is performed in place. - The optional background buffer is an array of nelmts - values of destination type which are merged with the converted - values to fill in cracks (for instance, background - might be an array of structs with the a and - b fields already initialized and the conversion - of buf supplies the c and d - field values). -
      Parameters: -
      -
      hid_t src_id -
      Identifier for the source datatype. -
      hid_t dst_id -
      Identifier for the destination datatype. -
      size_t nelmts -
      Size of array buf. -
      void *buf -
      Array containing pre- and post-conversion values. -
      void *background -
      Optional background buffer. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Tset_overflow -
      Signature: -
      herr_t H5Tset_overflow(H5T_overflow_t func) -
      Purpose: -
      Sets the overflow handler to a specified function. -
      Description: -
      H5Tset_overflow sets the overflow handler - to be the function specified by func. - func will be called for all datatype conversions that - result in an overflow. -

      - See the definition of H5T_overflow_t in - H5Tpublic.h for documentation - of arguments and return values. - The prototype for H5T_overflow_t is as follows:
      - herr_t (*H5T_overflow_t)(hid_t src_id, hid_t dst_id, - void *src_buf, void *dst_buf); - -

      - The NULL pointer may be passed to remove the overflow handler. -

      Parameters: -
      -
      H5T_overflow_t func -
      Overflow function. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Tget_overflow -
      Signature: - - -H5Tget_overflow () -
      H5T_overflow_t H5Tget_overflow(void) -
      Purpose: -
      Returns a pointer to the current global overflow function. -
      Description: -
      H5Tset_overflow returns a pointer - to the current global overflow function. - This is an application-defined function that is called whenever a - datatype conversion causes an overflow. -
      Parameters: -
      -
      None. -
      -
      Returns: -
      Returns a pointer to an application-defined function if successful. - Otherwise returns NULL; this can happen if no overflow handling - function is registered. -
      - - -
      -
      -
      Name: H5Tcreate -
      Signature: -
      hid_t H5Tcreate(H5T_class_t class, - size_tsize - ) -
      Purpose: -
      Creates a new dataype. -
      Description: -
      H5Tcreate creates a new dataype of the specified - class with the specified number of bytes. - Currently, only the H5T_COMPOUND datatype class is - supported with this function. Use H5Tcopy - to create integer or floating-point datatypes. - The datatype identifier returned from this function should be - released with H5Tclose or resource leaks will result. -
      Parameters: -
      -
      H5T_class_t class -
      Class of datatype to create. -
      size_t size -
      The number of bytes in the datatype to create. -
      -
      Returns: -
      Returns datatype identifier if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Tcopy -
      Signature: -
      hid_t H5Tcopy(hid_t type_id) -
      Purpose: -
      Copies an existing datatype. -
      Description: -
      H5Tcopy copies an existing datatype. - The returned type is always transient and unlocked. -

      - The type_id argument can be either a datatype - identifier, a predefined datatype (defined in - H5Tpublic.h), or a dataset identifier. - If type_id is a dataset identifier instead of a - datatype identifier, then this function returns a transient, - modifiable datatype which is a copy of the dataset's datatype. -

      - The datatype identifier returned should be released with - H5Tclose or resource leaks will occur. - -

      Parameters: -
      -
      hid_t type_id -
      Identifier of datatype to copy. Can be a datatype - identifier, a predefined datatype (defined in - H5Tpublic.h), or a dataset identifier. -
      -
      Returns: -
      Returns a datatype identifier if successful; - otherwise FAIL (-1) -
      - - -
      -
      -
      Name: H5Tequal -
      Signature: -
      hbool_t H5Tequal(hid_t type_id1, - hid_ttype_id2 - ) -
      Purpose: -
      Determines whether two datatype identifiers refer to the same datatype. -
      Description: -
      H5Tequal determines whether two datatype identifiers - refer to the same datatype. -
      Parameters: -
      -
      hid_t type_id1 -
      Identifier of datatype to compare. -
      hid_t type_id2 -
      Identifier of datatype to compare. -
      -
      Returns: -
      When successful, returns TRUE if the datatype identifiers - refer to the same datatype, else FALSE. - Otherwise returns FAIL (-1). -
      - - -
      -
      -
      Name: H5Tlock -
      Signature: -
      herr_t H5Tlock(hid_t type_id - ) -
      Purpose: -
      Locks a datatype. -
      Description: -
      H5Tlock locks the datatype specified by the - type_id identifier, making it read-only and - non-destrucible. This is normally done by the library for - predefined datatypes so the application does not - inadvertently change or delete a predefined type. - Once a datatype is locked it can never be unlocked. -
      Parameters: -
      -
      hid_t type_id -
      Identifier of datatype to lock. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Tget_class -
      Signature: -
      H5T_class_t H5Tget_class(hid_t type_id - ) -
      Purpose: -
      Returns the datatype class identifier. -
      Description: -
      H5Tget_class returns the datatype class identifier. -

      - Valid class identifiers, as defined in H5Tpublic.h, are: -

      • H5T_INTEGER (0) -
      • H5T_FLOAT (1) -
      • H5T_TIME (2) -
      • H5T_STRING (3) -
      • H5T_BITFIELD (4) -
      • H5T_OPAQUE (5) -
      • H5T_COMPOUND (6) -
      -
      Parameters: -
      -
      hid_t type_id -
      Identifier of datatype to query. -
      -
      Returns: -
      Returns datatype class identifier if successful; - otherwise H5T_NO_CLASS (-1). -
      - - -
      -
      -
      Name: H5Tget_size -
      Signature: -
      size_t H5Tget_size(hid_t type_id - ) -
      Purpose: -
      Returns the size of a datatype. -
      Description: -
      H5Tget_size returns the size of a datatype in bytes. -
      Parameters: -
      -
      hid_t type_id -
      Identifier of datatype to query. -
      -
      Returns: -
      Returns the size of the datatype in bytes if successful; - otherwise 0. -
      - - -
      -
      -
      Name: H5Tset_size -
      Signature: -
      herr_t H5Tset_size(hid_t type_id, - size_tsize - ) -
      Purpose: -
      Sets the total size for an atomic datatype. -
      Description: -
      H5Tset_size sets the total size in bytes, - size, for an atomic datatype (this operation - is not permitted on compound datatypes). If the size is - decreased so that the significant bits of the datatype extend beyond - the edge of the new size, then the `offset' property is decreased - toward zero. If the `offset' becomes zero and the significant - bits of the datatype still hang over the edge of the new size, then - the number of significant bits is decreased. - Adjusting the size of an H5T_STRING automatically sets the precision - to 8*size. All datatypes have a positive size. -
      Parameters: -
      -
      hid_t type_id -
      Identifier of datatype to change size. -
      size_t size -
      Size in bytes to modify datatype. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Tget_order -
      Signature: -
      H5T_order_t H5Tget_order(hid_t type_id - ) -
      Purpose: -
      Returns the byte order of an atomic datatype. -
      Description: -
      H5Tget_order returns the byte order of an - atomic datatype. -

      - Possible return values are: -

        -
        H5T_ORDER_LE (0) -
        Little endian byte ordering (default). -
        H5T_ORDER_BE (1) -
        Big endian byte ordering. -
        H5T_ORDER_VAX (2) -
        VAX mixed byte ordering (not currently supported). -
      -
      Parameters: -
      -
      hid_t type_id -
      Identifier of datatype to query. -
      -
      Returns: -
      Returns a byte order constant if successful; - otherwise H5T_ORDER_ERROR (-1). -
      - - -
      -
      -
      Name: H5Tset_order -
      Signature: -
      herr_t H5Tset_order(hid_t type_id, - H5T_order_torder - ) -
      Purpose: -
      Sets the byte ordering of an atomic datatype. -
      Description: -
      H5Tset_order sets the byte ordering of an atomic datatype. - Byte orderings currently supported are: -
        -
        H5T_ORDER_LE (0) -
        Little-endian byte ordering (default). -
        H5T_ORDER_BE (1) -
        Big-endian byte ordering. -
        H5T_ORDER_VAX (2) -
        VAX mixed byte ordering (not currently supported). -
      -
      Parameters: -
      -
      hid_t type_id -
      Identifier of datatype to set. -
      H5T_order_t order -
      Byte ordering constant. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Tget_precision -
      Signature: -
      size_t H5Tget_precision(hid_t type_id - ) -
      Purpose: -
      Returns the precision of an atomic datatype. -
      Description: -
      H5Tget_precision returns the precision of an atomic datatype. The - precision is the number of significant bits which, unless padding is - present, is 8 times larger than the value returned by H5Tget_size(). -
      Parameters: -
      -
      hid_t type_id -
      Identifier of datatype to query. -
      -
      Returns: -
      Returns the number of significant bits if successful; - otherwise 0. -
      - - -
      -
      -
      Name: H5Tset_precision -
      Signature: -
      herr_t H5Tset_precision(hid_t type_id, - size_tprecision - ) -
      Purpose: -
      Sets the precision of an atomic datatype. -
      Description: -
      H5Tset_precision sets the precision of an atomic datatype. - The precision is the number of significant bits which, unless padding - is present, is 8 times larger than the value returned by H5Tget_size(). -

      If the precision is increased then the offset is decreased and then - the size is increased to insure that significant bits do not "hang - over" the edge of the datatype. -

      Changing the precision of an H5T_STRING automatically changes the - size as well. The precision must be a multiple of 8. -

      When decreasing the precision of a floating point type, set the - locations and sizes of the sign, mantissa, and exponent fields - first. -

      Parameters: -
      -
      hid_t type_id -
      Identifier of datatype to set. -
      size_t precision -
      Number of bits of precision for datatype. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Tget_offset -
      Signature: -
      size_t H5Tget_offset(hid_t type_id - ) -
      Purpose: -
      Retrieves the bit offset of the first significant bit. -
      Description: -
      H5Tget_offset retrieves the bit offset of the first significant bit. - The signficant bits of an atomic datum can be offset from the beginning - of the memory for that datum by an amount of padding. The `offset' - property specifies the number of bits of padding that appear to the - "right of" the value. That is, if we have a 32-bit datum with 16-bits - of precision having the value 0x1122 then it will be layed out in - memory as (from small byte address toward larger byte addresses): -
      -
      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
      Byte PositionBig-Endian Offset=0Big-Endian Offset=16Little-Endian Offset=0Little-Endian Offset=16
      0:[ pad][0x11][0x22][ pad]
      1:[ pad][0x22][0x11][ pad]
      2:[0x11][ pad][ pad][0x22]
      3:[0x22][ pad][ pad][0x11]
      -
      Parameters: -
      -
      hid_t type_id -
      Identifier of datatype to query. -
      -
      Returns: -
      Returns a positive offset value if successful; - otherwise 0. -
      - - -
      -
      -
      Name: H5Tset_offset -
      Signature: -
      herr_t H5Tset_offset(hid_t type_id, - size_t offset - ) -
      Purpose: -
      Sets the bit offset of the first significant bit. -
      Description: -
      H5Tset_offset sets the bit offset of the first significant bit. The - signficant bits of an atomic datum can be offset from the beginning of - the memory for that datum by an amount of padding. The `offset' - property specifies the number of bits of padding that appear to the - "right of" the value. That is, if we have a 32-bit datum with 16-bits - of precision having the value 0x1122 then it will be layed out in - memory as (from small byte address toward larger byte addresses): -
      -
      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
      Byte PositionBig-Endian Offset=0Big-Endian Offset=16Little-Endian Offset=0Little-Endian Offset=16
      0:[ pad][0x11][0x22][ pad]
      1:[ pad][0x22][0x11][ pad]
      2:[0x11][ pad][ pad][0x22]
      3:[0x22][ pad][ pad][0x11]
      - -

      If the offset is incremented then the total size is -incremented also if necessary to prevent significant bits of -the value from hanging over the edge of the datatype. - -

      The offset of an H5T_STRING cannot be set to anything but -zero. -

      Parameters: -
      -
      hid_t type_id -
      Identifier of datatype to set. -
      size_t offset -
      Offset of first significant bit. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Tget_pad -
      Signature: -
      herr_t H5Tget_pad(hid_t type_id, - H5T_pad_t * lsb, - H5T_pad_t * msb - ) -
      Purpose: -
      Retrieves the padding type of the least and most-significant bit padding. -
      Description: -
      H5Tget_pad retrieves the padding type of the least and most-significant - bit padding. Valid types are: -
        -
        H5T_PAD_ZERO (0) -
        Set background to zeros. -
        H5T_PAD_ONE (1) -
        Set background to ones. -
        H5T_PAD_BACKGROUND (2) -
        Leave background alone. -
      -
      Parameters: -
      -
      hid_t type_id -
      IN: Identifier of datatype to query. -
      H5T_pad_t * lsb -
      OUT: Pointer to location to return least-significant - bit padding type. -
      H5T_pad_t * msb -
      OUT: Pointer to location to return most-significant - bit padding type. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Tset_pad -
      Signature: -
      herr_t H5Tset_pad(hid_t type_id, - H5T_pad_t lsb, - H5T_pad_t msb - ) -
      Purpose: -
      Sets the least and most-significant bits padding types. -
      Description: -
      H5Tset_pad sets the least and most-significant bits padding types. -
        -
        H5T_PAD_ZERO (0) -
        Set background to zeros. -
        H5T_PAD_ONE (1) -
        Set background to ones. -
        H5T_PAD_BACKGROUND (2) -
        Leave background alone. -
      -
      Parameters: -
      -
      hid_t type_id -
      Identifier of datatype to set. -
      H5T_pad_t lsb -
      Padding type for least-significant bits. -
      H5T_pad_t msb -
      Padding type for most-significant bits. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Tget_sign -
      Signature: -
      H5T_sign_t H5Tget_sign(hid_t type_id - ) -
      Purpose: -
      Retrieves the sign type for an integer type. -
      Description: -
      H5Tget_sign retrieves the sign type for an integer type. - Valid types are: -
        -
        H5T_SGN_NONE (0) -
        Unsigned integer type. -
        H5T_SGN_2 (1) -
        Two's complement signed integer type. -
      -
      Parameters: -
      -
      hid_t type_id -
      Identifier of datatype to query. -
      -
      Returns: -
      Returns a valid sign type if successful; - otherwise H5T_SGN_ERROR (-1). -
      - - -
      -
      -
      Name: H5Tset_sign -
      Signature: -
      herr_t H5Tset_sign(hid_t type_id, - H5T_sign_t sign - ) -
      Purpose: -
      Sets the sign proprety for an integer type. -
      Description: -
      H5Tset_sign sets the sign proprety for an integer type. -
        -
        H5T_SGN_NONE (0) -
        Unsigned integer type. -
        H5T_SGN_2 (1) -
        Two's complement signed integer type. -
      -
      Parameters: -
      -
      hid_t type_id -
      Identifier of datatype to set. -
      H5T_sign_t sign -
      Sign type. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Tget_fields -
      Signature: -
      herr_t H5Tget_fields(hid_t type_id, - size_t * epos, - size_t * esize, - size_t * mpos, - size_t * msize - ) -
      Purpose: -
      Retrieves floating point datatype bit field information. -
      Description: -
      H5Tget_fields retrieves information about the locations of the various - bit fields of a floating point datatype. The field positions are bit - positions in the significant region of the datatype. Bits are - numbered with the least significant bit number zero. - Any (or even all) of the arguments can be null pointers. -
      Parameters: -
      -
      hid_t type_id -
      IN: Identifier of datatype to query. -
      size_t * epos -
      OUT: Pointer to location to return exponent bit-position. -
      size_t * esize -
      OUT: Pointer to location to return size of exponent in bits. -
      size_t * mpos -
      OUT: Pointer to location to return mantissa bit-position. -
      size_t * msize -
      OUT: Pointer to location to return size of mantissa in bits. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Tset_fields -
      Signature: -
      herr_t H5Tset_fields(hid_t type_id, - size_t epos, - size_t esize, - size_t mpos, - size_t msize - ) -
      Purpose: -
      Sets locations and sizes of floating point bit fields. -
      Description: -
      H5Tset_fields sets the locations and sizes of the various floating - point bit fields. The field positions are bit positions in the - significant region of the datatype. Bits are numbered with the least - significant bit number zero. - -

      Fields are not allowed to extend beyond the number of bits of - precision, nor are they allowed to overlap with one another. -

      Parameters: -
      -
      hid_t type_id -
      Identifier of datatype to set. -
      size_t epos -
      Exponent bit position. -
      size_t esize -
      Size of exponent in bits. -
      size_t mpos -
      Mantissa bit position. -
      size_t msize -
      Size of mantissa in bits. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Tget_ebias -
      Signature: -
      size_t H5Tget_ebias(hid_t type_id - ) -
      Purpose: -
      Retrieves the exponent bias of a floating-point type. -
      Description: -
      H5Tget_ebias retrieves the exponent bias of a floating-point type. -
      Parameters: -
      -
      hid_t type_id -
      Identifier of datatype to query. -
      -
      Returns: -
      Returns the bias if successful; - otherwise 0. -
      - - -
      -
      -
      Name: H5Tset_ebias -
      Signature: -
      herr_t H5Tset_ebias(hid_t type_id, - size_t ebias - ) -
      Purpose: -
      Sets the exponent bias of a floating-point type. -
      Description: -
      H5Tset_ebias sets the exponent bias of a floating-point type. -
      Parameters: -
      -
      hid_t type_id -
      Identifier of datatype to set. -
      size_t ebias -
      Exponent bias value. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Tget_norm -
      Signature: -
      H5T_norm_t H5Tget_norm(hid_t type_id - ) -
      Purpose: -
      Retrieves mantissa normalization of a floating-point datatype. -
      Description: -
      H5Tget_norm retrieves the mantissa normalization of - a floating-point datatype. Valid normalization types are: -
        -
        H5T_NORM_IMPLIED (0) -
        MSB of mantissa is not stored, always 1 -
        H5T_NORM_MSBSET (1) -
        MSB of mantissa is always 1 -
        H5T_NORM_NONE (2) -
        Mantissa is not normalized -
      -
      Parameters: -
      -
      hid_t type_id -
      Identifier of datatype to query. -
      -
      Returns: -
      Returns a valid normalization type if successful; - otherwise H5T_NORM_ERROR (-1). -
      - - -
      -
      -
      Name: H5Tset_norm -
      Signature: -
      herr_t H5Tset_norm(hid_t type_id, - H5T_norm_t norm - ) -
      Purpose: -
      Sets the mantissa normalization of a floating-point datatype. -
      Description: -
      H5Tset_norm sets the mantissa normalization of - a floating-point datatype. Valid normalization types are: -
        -
        H5T_NORM_IMPLIED (0) -
        MSB of mantissa is not stored, always 1 -
        H5T_NORM_MSBSET (1) -
        MSB of mantissa is always 1 -
        H5T_NORM_NONE (2) -
        Mantissa is not normalized -
      -
      Parameters: -
      -
      hid_t type_id -
      Identifier of datatype to set. -
      H5T_norm_t norm -
      Mantissa normalization type. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Tget_inpad -
      Signature: -
      H5T_pad_t H5Tget_inpad(hid_t type_id - ) -
      Purpose: -
      Retrieves the internal padding type for unused bits in floating-point datatypes. -
      Description: -
      H5Tget_inpad retrieves the internal padding type for - unused bits in floating-point datatypes. - Valid padding types are: -
        -
        H5T_PAD_ZERO (0) -
        Set background to zeros. -
        H5T_PAD_ONE (1) -
        Set background to ones. -
        H5T_PAD_BACKGROUND (2) -
        Leave background alone. -
      -
      Parameters: -
      -
      hid_t type_id -
      Identifier of datatype to query. -
      -
      Returns: -
      Returns a valid padding type if successful; - otherwise H5T_PAD_ERROR (-1). -
      - - -
      -
      -
      Name: H5Tset_inpad -
      Signature: -
      herr_t H5Tset_inpad(hid_t type_id, - H5T_pad_t inpad - ) -
      Purpose: -
      Fills unused internal floating point bits. -
      Description: -
      If any internal bits of a floating point type are unused - (that is, those significant bits which are not part of the - sign, exponent, or mantissa), then H5Tset_inpad will be filled - according to the value of the padding value property inpad. - Valid padding types are: -
        -
        H5T_PAD_ZERO (0) -
        Set background to zeros. -
        H5T_PAD_ONE (1) -
        Set background to ones. -
        H5T_PAD_BACKGROUND (2) -
        Leave background alone. -
      -
      Parameters: -
      -
      hid_t type_id -
      Identifier of datatype to modify. -
      H5T_pad_t pad -
      Padding type. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Tget_cset -
      Signature: -
      H5T_cset_t H5Tget_cset(hid_t type_id - ) -
      Purpose: -
      Retrieves the character set type of a string datatype. -
      Description: -
      H5Tget_cset retrieves the character set type - of a string datatype. Valid character set types are: -
        -
        H5T_CSET_ASCII (0) -
        Character set is US ASCII -
      -
      Parameters: -
      -
      hid_t type_id -
      Identifier of datatype to query. -
      -
      Returns: -
      Returns a valid character set type if successful; - otherwise H5T_CSET_ERROR (-1). -
      - - -
      -
      -
      Name: H5Tset_cset -
      Signature: -
      herr_t H5Tset_cset(hid_t type_id, - H5T_cset_t cset - ) -
      Purpose: -
      Sets character set to be used. -
      Description: -
      H5Tset_cset the character set to be used. -

      - HDF5 is able to distinguish between character sets of different - nationalities and to convert between them to the extent possible. - Valid character set types are: -

        -
        H5T_CSET_ASCII (0) -
        Character set is US ASCII. -
      -
      Parameters: -
      -
      hid_t type_id -
      Identifier of datatype to modify. -
      H5T_cset_t cset -
      Character set type. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Tget_strpad -
      Signature: -
      H5T_str_t H5Tget_strpad(hid_t type_id - ) -
      Purpose: -
      Retrieves the string padding method for a string datatype. -
      Description: -
      H5Tget_strpad retrieves the string padding method - for a string datatype. Valid string padding types are: -
        -
        H5T_STR_NULL (0) -
        Pad with zeros (as C does) -
        H5T_STR_SPACE (1) -
        Pad with spaces (as FORTRAN does) -
      -
      Parameters: -
      -
      hid_t type_id -
      Identifier of datatype to query. -
      -
      Returns: -
      Returns a valid string padding type if successful; - otherwise H5T_STR_ERROR (-1). -
      - - -
      -
      -
      Name: H5Tset_strpad -
      Signature: -
      herr_t H5Tset_strpad(hid_t type_id, - H5T_str_t strpad - ) -
      Purpose: -
      Defines the storage mechanism for character strings. -
      Description: -
      The method used to store character strings differs with the - programming language: C usually null terminates strings while - Fortran left-justifies and space-pads strings. - H5Tset_strpad defines the storage mechanism for the string. - Valid string padding values are: -
        -
        H5T_STR_NULL (0) -
        Pad with zeros (as C does) -
        H5T_STR_SPACE (1) -
        Pad with spaces (as FORTRAN does) -
      -
      Parameters: -
      -
      hid_t type_id -
      Identifier of datatype to modify. -
      H5T_str_t strpad -
      String padding type. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Tget_nmembers -
      Signature: -
      intn H5Tget_nmembers(hid_t type_id - ) -
      Purpose: -
      Retrieves the number of fields in a compound datatype. -
      Description: -
      H5Tget_nmembers retrieves the number of fields a compound datatype has. -
      Parameters: -
      -
      hid_t type_id -
      Identifier of datatype to query. -
      -
      Returns: -
      Returns number of members datatype has if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Tget_member_name -
      Signature: -
      char * H5Tget_member_name(hid_t type_id, - int field_idx - ) -
      Purpose: -
      Retrieves the name of a field of a compound datatype. -
      Description: -
      H5Tget_member_name retrieves the name of a field - of a compound datatype. Fields are stored in no particular - order, with indexes 0 through N-1, where N is the value returned - by H5Tget_nmembers(). The name of the field is - allocated with malloc() and the caller is responsible - for freeing the memory used by the name. -
      Parameters: -
      -
      hid_t type_id -
      Identifier of datatype to query. -
      int field_idx -
      Field index (0-based) of the field name to retrieve. -
      -
      Returns: -
      Returns a valid pointer if successful; - otherwise NULL. -
      - - -
      -
      -
      Name: H5Tget_member_dims -
      Signature: -
      int H5Tget_member_dims(hid_t type_id, - int field_idx, - size_t *dims, - int *perm - ) -
      Purpose: -
      Returns the dimensionality of the field. -
      Description: -
      H5Tget_member_dims returns the dimensionality of - the field. The dimensions and permuation vector are returned - through arguments dims and perm, - both arrays of at least four elements. - Either (or even both) may be null pointers. -
      Parameters: -
      -
      hid_t type_id -
      Identifier of datatype to query. -
      int field_idx -
      Field index (0-based) of the field dims - to retrieve. -
      size_t * dims -
      Pointer to buffer to store the dimensions of the field. -
      int * perm -
      Pointer to buffer to store the permutation vector of - the field. -
      -
      Returns: -
      Returns the number of dimensions, a number from 0 to 4, - if successful. - Otherwise returns FAIL (-1). -
      - - -
      -
      -
      Name: H5Tget_member_type -
      Signature: -
      hid_t H5Tget_member_type(hid_t type_id, - int field_idx - ) -
      Purpose: -
      Returns the datatype of the specified member. -
      Description: -
      H5Tget_member_type returns the datatype of the specified member. The caller - should invoke H5Tclose() to release resources associated with the type. -
      Parameters: -
      -
      hid_t type_id -
      Identifier of datatype to query. -
      int field_idx -
      Field index (0-based) of the field type to retrieve. -
      -
      Returns: -
      Returns the identifier of a copy of the datatype of the field - if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Tinsert -
      Signature: -
      herr_t H5Tinsert(hid_t type_id, - const char * name, - off_t offset, - hid_t field_id - ) -
      Purpose: -
      Adds a new member to a compound datatype. -
      Description: -
      H5Tinsert adds another member to the compound datatype - type_id. The new member has a name which - must be unique within the compound datatype. - The offset argument defines the start of the member - in an instance of the compound datatype, and field_id - is the datatype identifier of the new member. -

      - Note: All members of a compound datatype must be atomic; a - compound datatype cannot have a member which is a compound - datatype. -

      Parameters: -
      -
      hid_t type_id -
      Identifier of compound datatype to modify. -
      const char * name -
      Name of the field to insert. -
      off_t offset -
      Offset in memory structure of the field to insert. -
      hid_t field_id -
      Datatype identifier of the field to insert. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Tpack -
      Signature: -
      herr_t H5Tpack(hid_t type_id - ) -
      Purpose: -
      Recursively removes padding from within a compound datatype. -
      Description: -
      H5Tpack recursively removes padding from within a compound - datatype to make it more efficient (space-wise) to store that data. -
      Parameters: -
      -
      hid_t type_id -
      Identifier of datatype to modify. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Tregister_hard -
      Signature: -
      herr_t H5Tregister_hard(const char - * name, hid_t src_id, - hid_t dst_id, - H5T_conv_t func - ) -
      Purpose: -
      Registers a hard conversion function. -
      Description: -
      H5Tregister_hard registers a hard conversion function for a datatype - conversion path. The path is specified by the source and destination - datatypes src_id and dst_id. A conversion - path can only have one hard function, so func replaces any - previous hard function. -

      - If func is the null pointer then any hard function - registered for this path is removed from this path. The soft functions - are then used when determining which conversion function is appropriate - for this path. The name argument is used only - for debugging and should be a short identifier for the function. -

      - The type of the conversion function pointer is declared as: -
      - typedef herr_t (*H5T_conv_t) (hid_t src_id, - hid_t dst_id, - H5T_cdata_t *cdata, - size_t nelmts, - void *buf, - void *bkg); -

      Parameters: -
      -
      const char * name -
      Name displayed in diagnostic output. -
      hid_t src_id -
      Identifier of source datatype. -
      hid_t dst_id -
      Identifier of destination datatype. -
      H5T_conv_t func -
      Function to convert between source and destination datatypes. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Tregister_soft -
      Signature: -
      herr_t H5Tregister_soft(const char - * name, H5T_class_t src_cls, - H5T_class_t dst_cls, - H5T_conv_t func - ) -
      Purpose: -
      Registers a soft conversion function. -
      Description: -
      H5Tregister_soft registers a soft conversion function by adding it to the - end of the master soft list and replacing the soft function in all - applicable existing conversion paths. The name - is used only for debugging and should be a short identifier - for the function. -

      - The type of the conversion function pointer is declared as: -
      - typedef herr_t (*H5T_conv_t) (hid_t src_id, - hid_t dst_id, - H5T_cdata_t *cdata, - size_t nelmts, - void *buf, - void *bkg); -

      Parameters: -
      -
      const char * name -
      Name displayed in diagnostic output. -
      H5T_class_t src_cls -
      Identifier of source datatype class. -
      H5T_class_t dst_cls -
      Identifier of destination datatype class. -
      H5T_conv_t func -
      Function to convert between source and destination datatypes. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Tunregister -
      Signature: -
      herr_t H5Tunregister(H5T_conv_t func - ) -
      Purpose: -
      Removes a conversion function from all conversion paths. -
      Description: -
      H5Tunregister removes a conversion function from all conversion paths. -

      - The type of the conversion function pointer is declared as: -
      - typedef herr_t (*H5T_conv_t) (hid_t src_id, - hid_t dst_id, - H5T_cdata_t *cdata, - size_t nelmts, - void *buf, - void *bkg); -

      Parameters: -
      -
      H5T_conv_t func -
      Function to remove from conversion paths. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -
      Name: H5Tclose -
      Signature: -
      herr_t H5Tclose(hid_t type_id - ) -
      Purpose: -
      Releases a datatype. -
      Description: -
      H5Tclose releases a datatype. Further access - through the datatype identifier is illegal. Failure to release - a datatype with this call will result in resource leaks. -
      Parameters: -
      -
      hid_t type_id -
      Identifier of datatype to release. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -HDF5 Reference Manual  -H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5P   -H5S   -H5T   -H5Z   -Tools   - -
      -
      - -
      -HDF Help Desk - -
      -Last modified: 2 September 1998 - - - diff --git a/doc/src/RM_H5Z.html b/doc/src/RM_H5Z.html deleted file mode 100644 index 5cdc5c8..0000000 --- a/doc/src/RM_H5Z.html +++ /dev/null @@ -1,129 +0,0 @@ - - -HDF5/H5Z Draft API Specification - - - - -
      -
      -HDF5 Reference Manual  -H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5P   -H5S   -H5T   -H5Z   -Tools   - -
      -
      - -
      -

      H5Z: Compression Interface

      -
      - -

      Compression API Functions

      - -This function enable the user to configure a new compression -method for the local environment. - - - -
      - -       -
        -   -
      -
      - -

      -HDF5 supports compression of raw data by compression methods -built into the library or defined by an application. -A compression method is associated with a dataset when the dataset is -created and is applied independently to each storage chunk of the dataset. -The dataset must use the H5D_CHUNKED storage layout. -

      -The HDF5 library does not support compression for contiguous datasets -because of the difficulty of implementing random access for partial I/O. -Compact dataset compression is not supported because it would not produce -significant results. -

      -See Compression in the -HDF5 User's Guide for further information. - -


      -
      -
      Name: H5Zregister -
      Signature: -
      herr_t H5Zregister(H5Z_method_t method, - const char *name, - H5Z_func_tcfunc, - H5Z_func_t ufunc - ) -
      Purpose: -
      Registers new compression and uncompression functions for a - method specified by a method number. -
      Description: -
      H5Zregister registers new compression and uncompression - functions for a method specified by a method number, method. - name is used for debugging and may be the null pointer. - Either or both of cfunc (the compression function) and - ufunc (the uncompression method) may be null pointers. -

      - The statistics associated with a method number are not reset - by this function; they accumulate over the life of the library. -

      Parameters: -
      -
      H5Z_method_t method -
      Number specifying compression method. -
      const char *name -
      Name associated with the method number. -
      H5Z_func_t cfunc -
      Compression method. -
      H5Z_func_t ufunc -
      Uncompression method. -
      -
      Returns: -
      Returns SUCCEED (0) if successful; - otherwise FAIL (-1). -
      - - -
      -
      -HDF5 Reference Manual  -H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5P   -H5S   -H5T   -H5Z   -Tools   - -
      -
      - -
      -HDF Help Desk - -
      -Last modified: 14 July 1998 - - - diff --git a/doc/src/Tools.html b/doc/src/Tools.html deleted file mode 100644 index f5c1578..0000000 --- a/doc/src/Tools.html +++ /dev/null @@ -1,265 +0,0 @@ - - -HDF5/Tools Draft API Specification - - - - -
      -
      -HDF5 Reference Manual  -H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5P   - -H5S   -H5T   -H5Z   -Tools   - -
      -
      - -
      -

      HDF5 Tools

      -
      - -

      HDF5 Tool Interfaces

      -

      -These tools enable the user to examine HDF5 files interactively. - - - - -
      -
        -
      • h5dump -- A tool for displaying - HDF5 file contents -
      • h5ls -- A tool for listing specified - features of HDF5 file contents -
      • h5repart -- A tool for repartitioning - a file, creating a family of files -
      -
      - -

      - - -


      -
      -
      Tool Name: h5dump -
      Syntax: -
      h5dump - [-h] - [-bb] - [-header] - [-a names] - [-d names] - [-g names] - [-l names] - file -
      Purpose: -
      Displays HDF5 file contents. -
      Description: -
      h5dump enables the user to interactively examine - the contents of an HDF5 file and dump those contents, - in human readable form, to an ASCII file or to other tools. -

      - h5dump displays HDF5 file content on - standard output. It may display the content of the - whole HDF5 file or selected objects, which can be groups, - datasets, links, or attributes. -

      - The -header option displays object - header information only and must appear before the - -a, -d, -g, or - -l options. -

      - Native data types created in one machine are displayed with native - names when h5dump runs in the same machine type. But when - h5dump runs in a different machine type, it displays the - native data types with standard type names. This will be changed in the - next release to always display with standard type names. -

      - The h5dump output is described in detail in - DDL, the Data Description - Language document. -

      Options and Parameters: -
      -
      -h -
      Print information on this command. -
      -bb -
      Displays the content of boot block. The default is - not to display. -
      -header -
      Displays header information only; no data is displayed. -
      -a names -
      Displays the specified attribute(s). -
      -d names -
      Displays the specified dataset(s). -
      -g names -
      Displays all the objects within the specified group(s). -
      -l names -
      Displays the specified link value(s). -
      file -
      The file to be examined. -
      -
      Current Status: -
      The current version of h5dump can display the - following types of information: -
        -
      • Group name -
      • Attribute name, data type, data space, and data -
      • Dataset name, data type, data space, and data -
      • Soft link name, link value -
      - -
      Limitions in the current implementaion include the - following: -
        -
      • Only one file is displayed at a time (file families are - not supported). -
      • The whole file content is displayed (none of above - options are supported). -
      • Compound data types are not yet supported. -
      • Complex data spaces are not yet supported. -
      -
      See Also: -
      HDF5 Data Description Language syntax - (DDL) -
      - - -
      -
      -
      Tool Name: h5ls -
      Syntax: -
      h5ls - [options] - file - [objects...] -
      Purpose: -
      Prints information about a file or dataset. -
      Description: -
      h5ls prints selected information about file objects - in the specified format. -
      Options and Parameters: -
      -
      -h   or   -?   or   --help -
      Print a usage message and exit. -
      -d   or   --dump -
      Print the values of datasets. -
      -wN   or   --width=N -
      Set the number of columns of output. -
      -v   or   --verbose -
      Generate more verbose output. -
      -V   or   --version -
      Print version number and exit. -
      file -
      The file name may include a printf(3C) integer format - such as %%05d to open a file family. -
      objects -
      The names of zero or more objects about which information - should be displayed. If a group is mentioned then - information about each of its members is displayed. - If no object names are specified then information about - all of the objects in the root group is displayed. -
      - -
      - - -
      -
      -
      Tool Name: h5repart -
      Syntax: -
      h5repart - [-v] - [-V] - [-[b|m]N[g|m|k]] - source_file - dest_file -
      Purpose: -
      Repartitions a file or family of files. -
      Description: -
      h5repart splits a single file into a family of - files, joins a family of files into a single file, or copies - one family of files to another while changing the size of the - family members. h5repart can also be used to - copy a single file to a single file with holes. -

      - Sizes associated with the -b and -m - options may be suffixed with g for gigabytes, - m for megabytes, or k for kilobytes. -

      - File family names include an integer printf - format such as %d. - -

      Options and Parameters: -
      -
      -v -
      Produce verbose output. -
      -V -
      Print a version number and exit. -
      -bN -
      The I/O block size, defaults to 1kB -
      -mN -
      The destination member size or 1GB -
      source_file -
      The name of the source file -
      dest_file -
      The name of the destination files -
      - -
      - - -
      -
      -HDF5 Reference Manual  -H5   -H5A   -H5D   -H5E   -H5F   -H5G   -H5P   - -H5S   -H5T   -H5Z   -Tools   - -
      -
      - -
      -HDF Help Desk - -
      -Last modified: 9 September 1998 - - - diff --git a/doc/tgif/APIGrammar.obj b/doc/tgif/APIGrammar.obj deleted file mode 100644 index bddc181..0000000 --- a/doc/tgif/APIGrammar.obj +++ /dev/null @@ -1,216 +0,0 @@ -%TGIF 3.0-p9 -state(0,33,100.000,0,0,0,16,1,9,1,1,0,2,0,0,1,0,'Courier',0,17,0,0,0,10,0,0,1,1,0,16,0,0,1,1,1,0,1088,1408,0,0,2880). -% -% @(#)$Header$ -% %W% -% -unit("1 pixel/pixel"). -page(1,"",1). -group([ -polygon('black',13,[ - 48,48,48,80,48,96,64,96,176,96,192,96,192,80,192,48, - 192,32,176,32,64,32,48,32,48,48],0,1,1,0,0,0,0,0,0,0,'1', - "2490",[ -]), -box('black',52,36,188,92,0,1,0,1,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',120,35,'Courier',0,17,1,1,0,1,160,16,2,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "API Flowchart", 1, 0, 0, -text('black',120,56,'Courier',0,17,1,1,0,1,130,16,3,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "API Flowchart"])) -]) -], -4,0,0,[ -]). -group([ -polygon('black',13,[ - 176,432,176,464,176,480,190,480,290,480,304,480,304,464,304,432, - 304,416,290,416,190,416,176,416,176,432],0,1,1,0,25,0,0,0,0,0,'1', - "2490",[ -]), -box('black',180,420,300,476,0,1,0,26,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',240,419,'Courier',0,17,1,1,0,1,160,16,27,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Manipulate", 1, 0, 0, -text('black',240,432,'Courier',0,17,2,1,0,1,100,32,28,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Manipulate", - "File"])) -]) -], -24,0,0,[ -]). -group([ -polygon('black',13,[ - 96,316,96,340,96,352,111,352,209,352,224,352,224,340,224,316, - 224,304,209,304,111,304,96,304,96,316],0,1,1,0,35,0,0,0,0,0,'1', - "2490",[ -]), -box('black',98,307,222,349,0,1,0,36,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',160,306,'Courier',0,17,1,1,0,1,160,16,37,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "H5Fopen", 1, 0, 0, -text('black',160,320,'Courier',0,17,1,1,0,1,70,16,38,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5Fopen"])) -]) -], -34,0,0,[ -]). -group([ -polygon('black',13,[ - 96,188,96,212,96,224,111,224,209,224,224,224,224,212,224,188, - 224,176,209,176,111,176,96,176,96,188],0,1,1,0,40,0,0,0,0,0,'1', - "2490",[ -]), -box('black',101,179,221,221,0,1,0,41,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',160,178,'Courier',0,17,1,1,0,1,160,16,42,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "H5dont_atexit", 1, 0, 0, -text('black',161,192,'Courier',0,17,1,1,0,1,130,16,43,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5dont_atexit"])) -]) -], -39,0,0,[ -]). -group([ -polygon('black',13,[ - 256,316,256,340,256,352,270,352,370,352,384,352,384,340,384,316, - 384,304,370,304,270,304,256,304,256,316],0,1,1,0,45,0,0,0,0,0,'1', - "2490",[ -]), -box('black',259,307,381,349,0,1,0,46,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',320,306,'Courier',0,17,1,1,0,1,160,16,47,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "H5Fcreate", 1, 0, 0, -text('black',320,320,'Courier',0,17,1,1,0,1,90,16,48,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5Fcreate"])) -]) -], -44,0,0,[ -]). -group([ -polygon('black',13,[ - 864,288,864,320,864,336,878,336,978,336,992,336,992,320,992,288, - 992,272,978,272,878,272,864,272,864,288],0,1,1,0,50,0,0,0,0,0,'1', - "2490",[ -]), -box('black',868,276,988,332,0,1,0,51,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',928,275,'Courier',0,17,1,1,0,1,160,16,52,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "File", 1, 0, 0, -text('black',928,280,'Courier',0,17,3,1,0,1,90,48,53,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "File", - "Template", - "Functions"])) -]) -], -49,0,0,[ -]). -poly('black',2,[ - 240,128,240,272],0,1,1,131,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -arc('black',0,1,1,0,226,277,242,293,242,277,226,293,0,32,32,5760,5760,132,0,0,8,3,0,1,0,'1','8','3',[ - 242,293,226,277,242,293,0,-1000,1000,0,13,-22],[ -]). -arc('black',0,1,1,0,194,277,210,293,210,277,194,293,0,32,32,5760,5760,140,0,0,8,3,0,1,0,'1','8','3',[ - 210,293,194,277,210,293,0,-1000,-1000,0,13,-22],[ -]). -poly('black',2,[ - 256,288,288,288],0,1,1,143,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -poly('black',2,[ - 192,288,224,288],0,1,1,145,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -arc('black',0,1,1,0,258,277,274,293,274,277,258,293,0,32,32,5760,5760,146,0,2,8,3,0,1,0,'1','8','3',[ - 274,293,258,277,274,293,0,1000,-1000,0,13,10],[ -]). -arc('black',0,1,1,0,130,277,146,293,146,277,130,293,0,32,32,5760,5760,149,0,2,8,3,0,1,0,'1','8','3',[ - 146,293,130,277,146,293,0,1000,1000,0,45,10],[ -]). -arc('black',0,1,1,0,194,165,210,181,210,165,194,181,0,32,32,5760,5760,151,0,0,8,3,0,1,0,'1','8','3',[ - 210,181,194,165,210,181,0,-1000,-1000,0,13,-22],[ -]). -arc('black',0,1,1,0,194,213,210,229,210,213,194,229,0,32,32,5760,5760,157,0,0,8,3,0,1,0,'1','8','3',[ - 210,229,194,213,210,229,0,1000,-1000,0,13,10],[ -]). -group([ -arc('black',0,1,1,0,226,325,242,341,242,325,226,341,0,32,32,5760,5760,161,0,0,8,3,0,1,0,'1','8','3',[ - 242,341,226,325,242,341,0,1000,1000,0,13,42],[ -]), -arc('black',0,1,1,0,194,325,210,341,210,325,194,341,0,32,32,5760,5760,162,0,0,8,3,0,1,0,'1','8','3',[ - 210,341,194,325,210,341,0,1000,-1000,0,13,42],[ -]), -poly('black',2,[ - 256,366,288,366],0,1,1,163,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]), -poly('black',2,[ - 192,366,224,366],0,1,1,164,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]), -arc('black',0,1,1,0,258,325,274,341,274,325,258,341,0,32,32,5760,5760,165,0,0,8,3,0,1,0,'1','8','3',[ - 274,341,258,325,274,341,0,-1000,-1000,0,13,10],[ -]), -arc('black',0,1,1,0,130,325,146,341,146,325,130,341,0,32,32,5760,5760,166,0,0,8,3,0,1,0,'1','8','3',[ - 146,341,130,325,146,341,0,-1000,1000,0,45,10],[ -]) -], -175,0,0,[ -]). -poly('black',2,[ - 240,416,240,384],2,1,1,188,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -poly('black',2,[ - 240,512,240,480],2,1,1,192,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -group([ -polygon('black',13,[ - 176,524,176,548,176,560,191,560,289,560,304,560,304,548,304,524, - 304,512,289,512,191,512,176,512,176,524],0,1,1,0,194,0,0,0,0,0,'1', - "2490",[ -]), -box('black',178,515,302,557,0,1,0,195,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',240,514,'Courier',0,17,1,1,0,1,160,16,196,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "H5Fclose", 1, 0, 0, -text('black',240,528,'Courier',0,17,1,1,0,1,80,16,197,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5Fclose"])) -]) -], -193,0,0,[ -]). -rcbox('black',848,192,1008,352,0,1,1,0,16,761,0,0,0,0,'1',[ -]). -group([ -polygon('black',13,[ - 864,220,864,244,864,256,879,256,977,256,992,256,992,244,992,220, - 992,208,977,208,879,208,864,208,864,220],0,1,1,0,763,0,0,0,0,0,'1', - "2490",[ -]), -box('black',869,211,989,253,0,1,0,764,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',928,210,'Courier',0,17,1,1,0,1,160,16,765,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "H5 Utility", 1, 0, 0, -text('black',929,216,'Courier',0,17,2,1,0,1,100,32,766,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "H5 Utility", - "Functions"])) -]) -], -762,0,0,[ -]). -text('black',864,128,'Courier',0,17,3,0,0,1,150,48,794,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "At Any Time,", - "These Functions", - "May Be Called"]). diff --git a/doc/tgif/FileGrammar.obj b/doc/tgif/FileGrammar.obj deleted file mode 100644 index d416523..0000000 --- a/doc/tgif/FileGrammar.obj +++ /dev/null @@ -1,552 +0,0 @@ -%TGIF 3.0-p9 -state(1,33,100.000,0,0,0,16,1,9,1,1,0,2,1,0,1,0,'Courier',0,17,0,0,0,10,0,0,1,1,0,16,0,0,1,1,1,0,1408,1088,0,0,2880). -% -% @(#)$Header$ -% %W% -% -unit("1 pixel/pixel"). -page(1,"",1). -group([ -polygon('black',13,[ - 48,96,48,112,48,128,64,128,128,128,144,128,144,112,144,96, - 144,80,128,80,64,80,48,80,48,96],0,1,1,0,22,0,0,0,0,0,'1', - "2490",[ -]), -box('black',52,84,140,124,0,1,0,23,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',96,83,'Courier',0,17,1,1,0,1,160,16,24,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "HDF5 File", 1, 0, 0, -text('black',96,96,'Courier',0,17,1,1,0,1,90,16,25,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "HDF5 File"])) -]) -], -26,0,0,[ -]). -group([ -polygon('black',13,[ - 160,287,160,321,160,352,179,352,253,352,272,352,272,321,272,287, - 272,256,253,256,179,256,160,256,160,287],0,1,1,0,35,0,0,0,0,0,'1', - "2490",[ -]), -box('black',165,263,267,345,0,1,0,36,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',216,262,'Courier',0,17,1,1,0,1,160,16,37,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Boot Block", 1, 0, 0, -text('black',216,272,'Courier',0,17,4,1,0,1,110,64,38,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Boot Block", - "&", - "File Infra-", - "structure"])) -]) -], -34,0,0,[ -]). -group([ -polygon('black',13,[ - 400,240,400,256,400,272,416,272,480,272,496,272,496,256,496,240, - 496,224,480,224,416,224,400,224,400,240],0,1,1,0,55,0,0,0,0,0,'1', - "2490",[ -]), -box('black',404,228,492,268,0,1,0,56,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',448,227,'Courier',0,17,1,1,0,1,160,16,57,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Group", 1, 0, 0, -text('black',448,240,'Courier',0,17,1,1,0,1,50,16,58,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Group"])) -]) -], -54,0,0,[ -]). -group([ -polygon('black',13,[ - 400,352,400,368,400,384,416,384,480,384,496,384,496,368,496,352, - 496,336,480,336,416,336,400,336,400,352],0,1,1,0,79,0,0,0,0,0,'1', - "2490",[ -]), -box('black',404,340,492,380,0,1,0,80,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',448,339,'Courier',0,17,1,1,0,1,160,16,81,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Dataset", 1, 0, 0, -text('black',448,352,'Courier',0,17,1,1,0,1,70,16,82,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Dataset"])) -]) -], -78,0,0,[ -]). -rcbox('black',128,192,576,400,0,1,1,0,16,90,0,0,0,0,'1',[ -]). -poly('black',2,[ - 272,304,336,304],0,1,1,95,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -poly('black',2,[ - 144,80,560,192],1,1,1,110,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -poly('black',2,[ - 48,128,128,384],1,1,1,111,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -group([ -polygon('black',13,[ - 272,464,272,480,272,496,294,496,378,496,400,496,400,480,400,464, - 400,448,378,448,294,448,272,448,272,464],0,1,1,0,130,0,0,0,0,0,'1', - "2490",[ -]), -box('black',278,452,394,492,0,1,0,131,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',336,451,'Courier',0,17,1,1,0,1,160,16,132,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Boot Block", 1, 0, 0, -text('black',336,464,'Courier',0,17,1,1,0,1,100,16,133,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Boot Block"])) -]) -], -129,0,0,[ -]). -group([ -polygon('black',13,[ - 272,512,272,528,272,544,294,544,378,544,400,544,400,528,400,512, - 400,496,378,496,294,496,272,496,272,512],0,1,1,0,149,0,0,0,0,0,'1', - "2490",[ -]), -box('black',278,500,394,540,0,1,0,150,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',336,499,'Courier',0,17,1,1,0,1,160,16,151,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Free-Space", 1, 0, 0, -text('black',336,504,'Courier',0,17,2,1,0,1,100,32,152,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Free-Space", - "List"])) -]) -], -148,0,0,[ -]). -group([ -polygon('black',13,[ - 272,560,272,576,272,592,294,592,378,592,400,592,400,576,400,560, - 400,544,378,544,294,544,272,544,272,560],0,1,1,0,161,0,0,0,0,0,'1', - "2490",[ -]), -box('black',278,548,394,588,0,1,0,162,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',336,547,'Courier',0,17,1,1,0,1,160,16,163,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Small-Object", 1, 0, 0, -text('black',336,552,'Courier',0,17,2,1,0,1,120,32,164,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Small-Object", - "Heap"])) -]) -], -160,0,0,[ -]). -rcbox('black',256,432,416,608,0,1,1,0,16,187,0,0,0,0,'1',[ -]). -poly('black',2,[ - 160,336,256,592],1,1,1,189,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -poly('black',2,[ - 272,272,400,432],1,1,1,190,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -group([ -polygon('black',13,[ - 788,352,788,368,788,384,808,384,888,384,908,384,908,368,908,352, - 908,336,888,336,808,336,788,336,788,352],0,1,1,0,197,0,0,0,0,0,'1', - "2490",[ -]), -box('black',793,340,903,380,0,1,0,198,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',848,339,'Courier',0,17,1,1,0,1,160,16,199,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Symbol-Table", 1, 0, 0, -text('black',848,344,'Courier',0,17,2,1,0,1,120,32,200,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Symbol-Table", - "Header"])) -]) -], -196,0,0,[ -]). -group([ -polygon('black',13,[ - 788,400,788,416,788,432,808,432,888,432,908,432,908,416,908,400, - 908,384,888,384,808,384,788,384,788,400],0,1,1,0,209,0,0,0,0,0,'1', - "2490",[ -]), -box('black',793,388,903,428,0,1,0,210,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',848,387,'Courier',0,17,1,1,0,1,160,16,211,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Symbol", 1, 0, 0, -text('black',848,392,'Courier',0,17,2,1,0,1,60,32,212,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Symbol", - "List"])) -]) -], -208,0,0,[ -]). -poly('black',2,[ - 496,352,560,432],1,1,1,213,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -rcbox('black',768,320,928,448,0,1,1,0,16,216,0,0,0,0,'1',[ -]). -poly('black',2,[ - 400,368,448,544],1,1,1,219,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -group([ -polygon('black',13,[ - 1024,513,1024,528,1024,544,1040,544,1104,544,1120,544,1120,528,1120,513, - 1120,496,1104,496,1040,496,1024,496,1024,513],0,1,1,0,228,0,0,0,0,0,'1', - "2490",[ -]), -box('black',1028,501,1116,540,0,1,0,229,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',1072,499,'Courier',0,17,1,1,0,1,160,16,230,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Group", 1, 0, 0, -text('black',1072,512,'Courier',0,17,1,1,0,1,50,16,231,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Group"])) -]) -], -227,0,0,[ -]). -group([ -polygon('black',13,[ - 1024,592,1024,608,1024,624,1040,624,1104,624,1120,624,1120,608,1120,592, - 1120,576,1104,576,1040,576,1024,576,1024,592],0,1,1,0,238,0,0,0,0,0,'1', - "2490",[ -]), -box('black',1028,580,1116,620,0,1,0,239,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',1072,579,'Courier',0,17,1,1,0,1,160,16,240,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Dataset", 1, 0, 0, -text('black',1072,592,'Courier',0,17,1,1,0,1,70,16,241,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Dataset"])) -]) -], -237,0,0,[ -]). -poly('black',2,[ - 944,560,1200,560],1,1,1,249,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -arc('black',0,1,1,0,1120,480,1152,520,1152,560,1152,480,0,64,80,-5760,11520,294,0,0,8,3,0,0,0,'1','8','3',[ -]). -poly('black',2,[ - 1152,480,992,480],0,1,1,295,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -arc('black',0,1,1,0,960,480,992,520,992,480,992,560,0,64,80,5760,11520,296,0,1,8,3,0,0,0,'1','8','3',[ -]). -rcbox('black',928,464,1216,640,0,1,1,0,16,301,0,0,0,0,'1',[ -]). -poly('black',2,[ - 416,224,624,192],1,1,1,303,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -poly('black',2,[ - 416,272,624,320],1,1,1,305,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -group([ -polygon('black',13,[ - 624,224,624,240,624,256,640,256,704,256,720,256,720,240,720,224, - 720,208,704,208,640,208,624,208,624,224],0,1,1,0,351,0,0,0,0,0,'1', - "2490",[ -]), -box('black',628,212,716,252,0,1,0,352,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',672,211,'Courier',0,17,1,1,0,1,160,16,353,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Group", 1, 0, 0, -text('black',672,216,'Courier',0,17,2,1,0,1,60,32,354,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Group", - "Header"])) -]) -], -350,0,0,[ -]). -group([ -polygon('black',13,[ - 624,272,624,288,624,304,640,304,704,304,720,304,720,288,720,272, - 720,256,704,256,640,256,624,256,624,272],0,1,1,0,363,0,0,0,0,0,'1', - "2490",[ -]), -box('black',628,260,716,300,0,1,0,364,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',672,259,'Courier',0,17,1,1,0,1,160,16,365,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Symbol", 1, 0, 0, -text('black',672,264,'Courier',0,17,2,1,0,1,60,32,366,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Symbol", - "Table"])) -]) -], -362,0,0,[ -]). -rcbox('black',608,192,736,320,0,1,1,0,16,389,0,0,0,0,'1',[ -]). -poly('black',2,[ - 704,256,912,320],1,1,1,396,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -poly('black',2,[ - 640,304,768,432],1,1,1,400,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -group([ -polygon('black',13,[ - 624,544,624,560,624,576,640,576,704,576,720,576,720,560,720,544, - 720,528,704,528,640,528,624,528,624,544],0,1,1,0,403,0,0,0,0,0,'1', - "2490",[ -]), -box('black',628,532,716,572,0,1,0,404,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',672,531,'Courier',0,17,1,1,0,1,160,16,405,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Object", 1, 0, 0, -text('black',672,536,'Courier',0,17,2,1,0,1,60,32,406,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Object", - "Name"])) -]) -], -402,0,0,[ -]). -group([ -polygon('black',13,[ - 624,592,624,608,624,624,640,624,704,624,720,624,720,608,720,592, - 720,576,704,576,640,576,624,576,624,592],0,1,1,0,408,0,0,0,0,0,'1', - "2490",[ -]), -box('black',628,580,716,620,0,1,0,409,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',672,579,'Courier',0,17,1,1,0,1,160,16,410,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Datatype", 1, 0, 0, -text('black',672,592,'Courier',0,17,1,1,0,1,80,16,411,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Datatype"])) -]) -], -407,0,0,[ -]). -group([ -polygon('black',13,[ - 624,640,624,656,624,672,640,672,704,672,720,672,720,656,720,640, - 720,624,704,624,640,624,624,624,624,640],0,1,1,0,413,0,0,0,0,0,'1', - "2490",[ -]), -box('black',628,628,716,668,0,1,0,414,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',672,627,'Courier',0,17,1,1,0,1,160,16,415,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Dataspace", 1, 0, 0, -text('black',672,640,'Courier',0,17,1,1,0,1,90,16,416,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Dataspace"])) -]) -], -412,0,0,[ -]). -group([ -polygon('black',13,[ - 624,688,624,704,624,720,640,720,704,720,720,720,720,704,720,688, - 720,672,704,672,640,672,624,672,624,688],0,1,1,0,418,0,0,0,0,0,'1', - "2490",[ -]), -box('black',628,676,716,716,0,1,0,419,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',672,675,'Courier',0,17,1,1,0,1,160,16,420,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Attribute", 1, 0, 0, -text('black',672,680,'Courier',0,17,2,1,0,1,90,32,421,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Attribute", - "List"])) -]) -], -417,0,0,[ -]). -group([ -polygon('black',13,[ - 624,736,624,752,624,768,640,768,704,768,720,768,720,752,720,736, - 720,720,704,720,640,720,624,720,624,736],0,1,1,0,423,0,0,0,0,0,'1', - "2490",[ -]), -box('black',628,724,716,764,0,1,0,424,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',672,723,'Courier',0,17,1,1,0,1,160,16,425,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Misc.", 1, 0, 0, -text('black',672,728,'Courier',0,17,2,1,0,1,80,32,426,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Misc.", - "Metadata"])) -]) -], -422,0,0,[ -]). -rcbox('black',608,512,736,784,0,1,1,0,16,461,0,0,0,0,'1',[ -]). -poly('black',2,[ - 544,448,720,512],1,1,1,463,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -poly('black',2,[ - 480,496,608,768],1,1,1,465,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -group([ -polygon('black',13,[ - 800,80,800,96,800,112,816,112,880,112,896,112,896,96,896,80, - 896,64,880,64,816,64,800,64,800,80],0,1,1,0,544,0,0,0,0,0,'1', - "2490",[ -]), -box('black',804,68,892,108,0,1,0,545,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',848,67,'Courier',0,17,1,1,0,1,160,16,546,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Group", 1, 0, 0, -text('black',848,72,'Courier',0,17,2,1,0,1,50,32,547,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Group", - "Name"])) -]) -], -543,0,0,[ -]). -group([ -polygon('black',13,[ - 800,128,800,144,800,160,816,160,880,160,896,160,896,144,896,128, - 896,112,880,112,816,112,800,112,800,128],0,1,1,0,559,0,0,0,0,0,'1', - "2490",[ -]), -box('black',804,116,892,156,0,1,0,560,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',848,115,'Courier',0,17,1,1,0,1,160,16,561,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Attribute", 1, 0, 0, -text('black',848,120,'Courier',0,17,2,1,0,1,90,32,562,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Attribute", - "List"])) -]) -], -558,0,0,[ -]). -group([ -polygon('black',13,[ - 800,176,800,192,800,208,816,208,880,208,896,208,896,192,896,176, - 896,160,880,160,816,160,800,160,800,176],0,1,1,0,564,0,0,0,0,0,'1', - "2490",[ -]), -box('black',804,164,892,204,0,1,0,565,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',848,163,'Courier',0,17,1,1,0,1,160,16,566,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Misc.", 1, 0, 0, -text('black',848,168,'Courier',0,17,2,1,0,1,80,32,567,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Misc.", - "Metadata"])) -]) -], -563,0,0,[ -]). -rcbox('black',784,48,912,224,0,1,1,0,16,568,0,0,0,0,'1',[ -]). -poly('black',2,[ - 640,208,784,64],1,1,1,587,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -poly('black',2,[ - 704,256,896,224],1,1,1,589,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -group([ -polygon('black',13,[ - 464,464,464,480,464,496,480,496,544,496,560,496,560,480,560,464, - 560,448,544,448,480,448,464,448,464,464],0,1,1,0,616,0,0,0,0,0,'1', - "2490",[ -]), -box('black',468,452,556,492,0,1,0,617,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',512,451,'Courier',0,17,1,1,0,1,160,16,618,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Object", 1, 0, 0, -text('black',512,456,'Courier',0,17,2,1,0,1,60,32,619,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Object", - "Header"])) -]) -], -615,0,0,[ -]). -group([ -polygon('black',13,[ - 464,512,464,528,464,544,480,544,544,544,560,544,560,528,560,512, - 560,496,544,496,480,496,464,496,464,512],0,1,1,0,621,0,0,0,0,0,'1', - "2490",[ -]), -box('black',468,500,556,540,0,1,0,622,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',512,499,'Courier',0,17,1,1,0,1,160,16,623,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Object", 1, 0, 0, -text('black',512,504,'Courier',0,17,2,1,0,1,60,32,624,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Object", - "Data"])) -]) -], -620,0,0,[ -]). -rcbox('black',448,432,576,560,0,1,1,0,16,625,0,0,0,0,'1',[ -]). -poly('black',2,[ - 800,432,928,624],1,1,1,647,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -poly('black',2,[ - 896,384,1200,464],1,1,1,649,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -group([ -arc('black',0,1,1,0,1104,560,1120,576,1120,592,1136,576,0,32,32,-5760,5760,658,0,0,8,3,0,0,0,'1','8','3',[ -]), -arc('black',0,1,1,0,1136,560,1152,576,1152,560,1136,576,0,32,32,5760,5760,665,0,2,8,3,0,0,0,'1','8','3',[ -]) -], -672,0,0,[ -]). -group([ -arc('black',0,1,1,0,1104,528,1120,544,1120,528,1136,544,1,32,32,5760,-5760,674,0,0,8,3,0,0,0,'1','8','3',[ -]), -arc('black',0,1,1,0,1136,528,1152,544,1152,560,1136,544,1,32,32,-5760,-5760,675,0,2,8,3,0,0,0,'1','8','3',[ -]) -], -673,0,0,[ -]). -group([ -arc('black',0,1,1,0,1008,560,1024,576,1024,592,1008,576,1,32,32,-5760,-5760,695,0,0,8,3,0,0,0,'1','8','3',[ -]), -arc('black',0,1,1,0,976,560,992,576,992,560,1008,576,1,32,32,5760,-5760,696,0,0,8,3,0,0,0,'1','8','3',[ -]) -], -694,0,0,[ -]). -group([ -arc('black',0,1,1,0,1008,528,1024,544,1024,528,1008,544,0,32,32,5760,5760,698,0,0,8,3,0,0,0,'1','8','3',[ -]), -arc('black',0,1,1,0,976,528,992,544,992,560,1008,544,0,32,32,-5760,5760,699,0,0,8,3,0,0,0,'1','8','3',[ -]) -], -697,0,0,[ -]). -arc('black',0,1,1,0,368,304,400,328,400,352,368,328,1,64,48,-5760,-5760,728,0,2,8,3,0,0,0,'1','8','3',[ -]). -arc('black',0,1,1,0,304,304,336,328,336,304,368,328,1,64,48,5760,-5760,729,0,0,8,3,0,0,0,'1','8','3',[ -]). -arc('black',0,1,1,0,368,256,400,280,400,256,368,280,0,64,48,5760,5760,731,0,2,8,3,0,0,0,'1','8','3',[ -]). -arc('black',0,1,1,0,304,256,336,280,336,304,368,280,0,64,48,-5760,5760,732,0,0,8,3,0,0,0,'1','8','3',[ -]). diff --git a/doc/tgif/IOPipe.obj b/doc/tgif/IOPipe.obj deleted file mode 100644 index 96feec2..0000000 --- a/doc/tgif/IOPipe.obj +++ /dev/null @@ -1,715 +0,0 @@ -%TGIF 3.0-p9 -state(1,33,100.000,0,0,0,16,1,9,1,1,0,2,1,0,1,1,'Courier',0,17,0,0,0,10,0,0,1,1,0,16,0,0,1,1,1,0,1408,1088,0,0,2880). -% -% @(#)$Header$ -% %W% -% -unit("1 pixel/pixel"). -page(1,"",1). -poly('black',2,[ - 225,238,145,238],1,1,1,1060,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -group([ -polygon('black',13,[ - 32,200,32,276,32,294,43,294,133,294,144,294,144,276,144,200, - 144,182,133,182,43,182,32,182,32,200],0,1,1,0,1062,0,0,0,0,0,'1', - "2490",[ -]), -box('black',35,186,141,290,0,1,0,1063,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',88,185,'Courier',0,17,1,1,0,1,160,16,1064,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Application", 1, 0, 0, -text('black',88,222,'Courier',0,17,2,1,0,1,110,32,1065,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Application", - "Buffer"])) -]) -], -1061,0,0,[ -]). -group([ -polygon('black',13,[ - 224,200,224,276,224,294,235,294,325,294,336,294,336,276,336,200, - 336,182,325,182,235,182,224,182,224,200],0,1,1,0,1075,0,0,0,0,0,'1', - "2490",[ -]), -box('black',227,186,333,290,0,1,0,1076,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',280,185,'Courier',0,17,1,1,0,1,160,16,1077,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Conversion", 1, 0, 0, -text('black',280,222,'Courier',0,17,2,1,0,1,100,32,1078,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Conversion", - "Buffer"])) -]) -], -1074,0,0,[ -]). -text('black',376,222,'Courier',0,17,2,1,0,1,60,32,1080,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Gather", - "2"]). -arc('black',0,1,1,0,236,252,280,294,324,294,236,294,1,88,84,0,-11520,1081,0,1,8,3,0,0,0,'1','8','3',[ -]). -text('black',280,294,'Courier',0,17,3,1,0,1,100,48,1082,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Type", - "Conversion", - "3"]). -poly('black',2,[ - 416,238,336,238],1,1,1,1100,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -text('black',472,294,'Courier',0,17,4,1,0,1,100,64,1180,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Compute", - "Dataspace", - "Conversion", - "1"]). -poly('black',2,[ - 144,686,224,686],1,1,1,1190,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -group([ -polygon('black',13,[ - 32,648,32,724,32,742,44,742,132,742,144,742,144,724,144,648, - 144,630,132,630,44,630,32,630,32,648],0,1,1,0,1192,0,0,0,0,0,'1', - "2490",[ -]), -box('black',34,634,142,738,0,1,0,1193,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',88,633,'Courier',0,17,1,1,0,1,160,16,1194,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Application", 1, 0, 0, -text('black',88,670,'Courier',0,17,2,1,0,1,110,32,1195,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Application", - "Buffer"])) -]) -], -1191,0,0,[ -]). -text('black',184,670,'Courier',0,17,2,1,0,1,60,32,1196,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Gather", - "2"]). -group([ -polygon('black',13,[ - 224,648,224,724,224,742,235,742,325,742,336,742,336,724,336,648, - 336,630,325,630,235,630,224,630,224,648],0,1,1,0,1198,0,0,0,0,0,'1', - "2490",[ -]), -box('black',227,634,333,738,0,1,0,1199,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',280,633,'Courier',0,17,1,1,0,1,160,16,1200,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Conversion", 1, 0, 0, -text('black',280,670,'Courier',0,17,2,1,0,1,100,32,1201,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Conversion", - "Buffer"])) -]) -], -1197,0,0,[ -]). -text('black',377,670,'Courier',0,17,2,1,0,1,70,32,1202,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Scatter", - "4"]). -text('black',280,742,'Courier',0,17,3,1,0,1,100,48,1204,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Type", - "Conversion", - "3"]). -poly('black',2,[ - 337,686,417,686],1,1,1,1215,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -text('black',480,742,'Courier',0,17,3,1,0,1,60,48,1224,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Buffer", - "Merge", - "5"]). -arc('black',0,1,1,0,44,700,88,742,44,742,132,742,0,88,84,-11520,11520,1226,0,1,8,3,0,0,0,'1','8','3',[ -]). -text('black',88,742,'Courier',0,17,4,1,0,1,100,64,1227,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Compute", - "Dataspace", - "Conversion", - "1"]). -text('black',720,86,'Courier-Bold',1,18,1,1,0,1,143,19,1270,0,15,4,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Read Pipeline"]). -text('black',720,534,'Courier-Bold',1,18,1,1,0,1,154,19,1276,0,15,4,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Write Pipeline"]). -arc('black',0,1,1,0,428,252,472,294,516,294,428,294,1,88,84,0,-11520,1287,0,1,8,3,0,0,0,'1','8','3',[ -]). -text('black',185,222,'Courier',0,17,2,1,0,1,70,32,1293,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Scatter", - "4"]). -arc('black',0,1,1,0,236,700,280,742,236,742,324,742,0,88,84,-11520,11520,1334,0,1,8,3,0,0,0,'1','8','3',[ -]). -arc('black',0,1,1,0,436,700,480,742,436,742,524,742,0,88,84,-11520,11520,1336,0,1,8,3,0,0,0,'1','8','3',[ -]). -poly('black',2,[ - 767,686,735,686],3,1,1,1416,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -poly('black',2,[ - 928,686,896,686],3,1,1,1419,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -group([ -polygon('black',13,[ - 633,654,633,662,633,672,645,672,697,672,709,672,709,662,709,654, - 709,644,697,644,645,644,633,644,633,654],0,1,1,0,1444,0,0,0,0,0,'1', - "2490",[ -]), -box('black',636,647,706,669,0,1,0,1445,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',671,646,'Courier',0,17,1,1,0,1,160,16,1446,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Chunk", 1, 0, 0, -text('black',671,650,'Courier',0,17,1,1,0,1,50,16,1447,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Chunk"])) -]) -], -1443,0,0,[ -]). -group([ -polygon('black',13,[ - 633,710,633,718,633,728,645,728,697,728,709,728,709,718,709,710, - 709,700,697,700,645,700,633,700,633,710],0,1,1,0,1449,0,0,0,0,0,'1', - "2490",[ -]), -box('black',636,703,706,725,0,1,0,1450,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',671,702,'Courier',0,17,1,1,0,1,160,16,1451,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Virtual", 1, 0, 0, -text('black',671,706,'Courier',0,17,1,1,0,1,70,16,1452,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Virtual"])) -]) -], -1448,0,0,[ -]). -poly('black',2,[ - 607,686,735,686],3,1,1,1453,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -rcbox('black',607,630,735,742,0,1,1,0,16,1456,0,0,0,0,'1',[ -]). -arc('black',0,1,1,0,698,658,710,672,710,658,723,672,1,24,28,5760,-5760,1461,0,2,8,3,0,0,0,'1','8','3',[ -]). -arc('black',0,1,1,0,723,658,736,672,736,686,723,672,1,26,28,-5760,-5760,1462,0,2,8,3,0,0,0,'1','8','3',[ -]). -arc('black',0,1,1,0,621,686,634,700,634,714,621,700,1,26,28,-5760,-5760,1464,0,2,8,3,0,0,0,'1','8','3',[ -]). -arc('black',0,1,1,0,595,686,608,700,608,686,621,700,1,26,28,5760,-5760,1465,0,2,8,3,0,0,0,'1','8','3',[ -]). -arc('black',0,1,1,0,621,658,634,672,634,658,621,672,0,26,28,5760,5760,1467,0,2,8,3,0,0,0,'1','8','3',[ -]). -arc('black',0,1,1,0,595,658,608,672,608,686,621,672,0,26,28,-5760,5760,1468,0,2,8,3,0,0,0,'1','8','3',[ -]). -text('black',672,582,'Courier',0,17,2,1,0,1,140,32,1526,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Dataset Layout", - "Management"]). -group([ -polygon('black',13,[ - 794,654,794,662,794,672,806,672,858,672,870,672,870,662,870,654, - 870,644,858,644,806,644,794,644,794,654],0,1,1,0,1531,0,0,0,0,0,'1', - "2490",[ -]), -box('black',797,647,867,669,0,1,0,1532,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',832,646,'Courier',0,17,1,1,0,1,160,16,1533,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Compress", 1, 0, 0, -text('black',832,650,'Courier',0,17,1,1,0,1,80,16,1534,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Compress"])) -]) -], -1530,0,0,[ -]). -poly('black',2,[ - 768,686,896,686],3,1,1,1540,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -rcbox('black',768,630,896,742,0,1,1,0,16,1541,0,0,0,0,'1',[ -]). -arc('black',0,1,1,0,858,658,870,672,870,658,883,672,1,24,28,5760,-5760,1544,0,2,8,3,0,0,0,'1','8','3',[ -]). -arc('black',0,1,1,0,883,658,896,672,896,686,883,672,1,26,28,-5760,-5760,1545,0,2,8,3,0,0,0,'1','8','3',[ -]). -arc('black',0,1,1,0,781,658,794,672,794,658,781,672,0,26,28,5760,5760,1548,0,2,8,3,0,0,0,'1','8','3',[ -]). -arc('black',0,1,1,0,755,658,768,672,768,686,781,672,0,26,28,-5760,5760,1549,0,2,8,3,0,0,0,'1','8','3',[ -]). -group([ -polygon('black',13,[ - 1248,200,1248,276,1248,294,1259,294,1349,294,1360,294,1360,276,1360,200, - 1360,182,1349,182,1259,182,1248,182,1248,200],0,1,1,0,1596,0,0,0,0,0,'1', - "2490",[ -]), -box('black',1251,186,1357,290,0,1,0,1597,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',1304,185,'Courier',0,17,1,1,0,1,160,16,1598,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Disk", 1, 0, 0, -text('black',1304,230,'Courier',0,17,1,1,0,1,40,16,1599,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Disk"])) -]) -], -1595,0,0,[ -]). -poly('black',2,[ - 608,214,544,214],1,1,1,1600,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -text('black',576,198,'Courier',0,17,2,1,0,1,50,32,1601,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Read", - "Block"]). -poly('black',2,[ - 768,238,736,238],1,1,1,1604,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -poly('black',2,[ - 928,238,896,238],1,1,1,1605,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -group([ -polygon('black',13,[ - 634,206,634,214,634,224,646,224,698,224,710,224,710,214,710,206, - 710,196,698,196,646,196,634,196,634,206],0,1,1,0,1607,0,0,0,0,0,'1', - "2490",[ -]), -box('black',637,199,707,221,0,1,0,1608,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',672,198,'Courier',0,17,1,1,0,1,160,16,1609,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Chunk", 1, 0, 0, -text('black',672,202,'Courier',0,17,1,1,0,1,50,16,1610,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Chunk"])) -]) -], -1606,0,0,[ -]). -group([ -polygon('black',13,[ - 634,262,634,270,634,280,646,280,698,280,710,280,710,270,710,262, - 710,252,698,252,646,252,634,252,634,262],0,1,1,0,1612,0,0,0,0,0,'1', - "2490",[ -]), -box('black',637,255,707,277,0,1,0,1613,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',672,254,'Courier',0,17,1,1,0,1,160,16,1614,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Virtual", 1, 0, 0, -text('black',672,258,'Courier',0,17,1,1,0,1,70,16,1615,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Virtual"])) -]) -], -1611,0,0,[ -]). -poly('black',2,[ - 608,238,736,238],2,1,1,1616,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -rcbox('black',608,182,736,294,0,1,1,0,16,1617,0,0,0,0,'1',[ -]). -arc('black',0,1,1,0,698,210,710,224,710,210,723,224,1,24,28,5760,-5760,1620,0,2,8,3,0,0,0,'1','8','3',[ -]). -arc('black',0,1,1,0,723,210,736,224,736,238,723,224,1,26,28,-5760,-5760,1621,0,0,8,3,0,0,0,'1','8','3',[ -]). -arc('black',0,1,1,0,621,238,634,252,634,266,621,252,1,26,28,-5760,-5760,1622,0,0,8,3,0,0,0,'1','8','3',[ -]). -arc('black',0,1,1,0,595,238,608,252,608,238,621,252,1,26,28,5760,-5760,1623,0,2,8,3,0,0,0,'1','8','3',[ -]). -arc('black',0,1,1,0,621,210,634,224,634,210,621,224,0,26,28,5760,5760,1624,0,0,8,3,0,0,0,'1','8','3',[ -]). -arc('black',0,1,1,0,595,210,608,224,608,238,621,224,0,26,28,-5760,5760,1625,0,2,8,3,0,0,0,'1','8','3',[ -]). -text('black',672,134,'Courier',0,17,2,1,0,1,140,32,1626,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Dataset Layout", - "Management"]). -group([ -polygon('black',13,[ - 794,206,794,214,794,224,806,224,858,224,870,224,870,214,870,206, - 870,196,858,196,806,196,794,196,794,206],0,1,1,0,1628,0,0,0,0,0,'1', - "2490",[ -]), -box('black',797,199,867,221,0,1,0,1629,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',832,198,'Courier',0,17,1,1,0,1,160,16,1630,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Compress", 1, 0, 0, -text('black',832,202,'Courier',0,17,1,1,0,1,80,16,1631,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Compress"])) -]) -], -1627,0,0,[ -]). -poly('black',2,[ - 768,238,896,238],2,1,1,1632,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -rcbox('black',768,182,896,294,0,1,1,0,16,1633,0,0,0,0,'1',[ -]). -arc('black',0,1,1,0,858,210,870,224,870,210,883,224,1,24,28,5760,-5760,1634,0,2,8,3,0,0,0,'1','8','3',[ -]). -arc('black',0,1,1,0,883,210,896,224,896,238,883,224,1,26,28,-5760,-5760,1635,0,0,8,3,0,0,0,'1','8','3',[ -]). -arc('black',0,1,1,0,781,210,794,224,794,210,781,224,0,26,28,5760,5760,1636,0,0,8,3,0,0,0,'1','8','3',[ -]). -arc('black',0,1,1,0,755,210,768,224,768,238,781,224,0,26,28,-5760,5760,1637,0,2,8,3,0,0,0,'1','8','3',[ -]). -arc('black',0,1,1,0,670,710,710,742,710,710,750,742,1,80,64,5760,-5760,1458,0,2,8,3,0,0,0,'1','8','3',[ -]). -arc('black',0,1,1,0,702,694,726,742,726,790,750,742,0,48,96,-5760,5760,1459,0,2,8,3,0,0,0,'1','8','3',[ -]). -group([ -polygon('black',13,[ - 614,766,614,798,614,806,625,806,715,806,726,806,726,798,726,766, - 726,758,715,758,625,758,614,758,614,766],0,1,1,0,1684,0,0,0,0,0,'1', - "2490",[ -]), -box('black',617,760,723,804,0,1,0,1685,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',670,760,'Courier',0,17,1,1,0,1,160,16,1686,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Re-Enter", 1, 0, 0, -text('black',670,766,'Courier',0,17,2,1,0,1,80,32,1687,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Re-Enter", - "Library"])) -]) -], -1683,0,0,[ -]). -arc('black',0,1,1,0,662,262,710,295,710,262,758,295,1,96,66,5760,-5760,1618,0,2,8,3,0,0,0,'1','8','3',[ -]). -group([ -polygon('black',13,[ - 1248,648,1248,724,1248,742,1259,742,1349,742,1360,742,1360,724,1360,648, - 1360,630,1349,630,1259,630,1248,630,1248,648],0,1,1,0,1968,0,0,0,0,0,'1', - "2490",[ -]), -box('black',1251,634,1357,738,0,1,0,1969,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',1304,633,'Courier',0,17,1,1,0,1,160,16,1970,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Disk", 1, 0, 0, -text('black',1304,678,'Courier',0,17,1,1,0,1,40,16,1971,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Disk"])) -]) -], -1967,0,0,[ -]). -poly('black',2,[ - 1088,686,1056,686],3,1,1,1994,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -group([ -polygon('black',13,[ - 954,654,954,662,954,672,966,672,1018,672,1030,672,1030,662,1030,654, - 1030,644,1018,644,966,644,954,644,954,654],0,1,1,0,1996,0,0,0,0,0,'1', - "2490",[ -]), -box('black',957,647,1027,669,0,1,0,1997,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',992,646,'Courier',0,17,1,1,0,1,160,16,1998,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "External", 1, 0, 0, -text('black',992,650,'Courier',0,17,1,1,0,1,80,16,1999,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "External"])) -]) -], -1995,0,0,[ -]). -poly('black',2,[ - 928,686,1056,686],3,1,1,2000,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -rcbox('black',928,630,1056,742,0,1,1,0,16,2001,0,0,0,0,'1',[ -]). -arc('black',0,1,1,0,1018,658,1030,672,1030,658,1043,672,1,24,28,5760,-5760,2002,0,2,8,3,0,0,0,'1','8','3',[ -]). -arc('black',0,1,1,0,1043,658,1056,672,1056,686,1043,672,1,26,28,-5760,-5760,2003,0,2,8,3,0,0,0,'1','8','3',[ -]). -arc('black',0,1,1,0,941,658,954,672,954,658,941,672,0,26,28,5760,5760,2004,0,2,8,3,0,0,0,'1','8','3',[ -]). -arc('black',0,1,1,0,915,658,928,672,928,686,941,672,0,26,28,-5760,5760,2005,0,2,8,3,0,0,0,'1','8','3',[ -]). -group([ -polygon('black',13,[ - 1114,654,1114,662,1114,672,1126,672,1178,672,1190,672,1190,662,1190,654, - 1190,644,1178,644,1126,644,1114,644,1114,654],0,1,1,0,2012,0,0,0,0,0,'1', - "2490",[ -]), -box('black',1117,647,1187,669,0,1,0,2013,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',1152,646,'Courier',0,17,1,1,0,1,160,16,2014,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Serial", 1, 0, 0, -text('black',1152,650,'Courier',0,17,1,1,0,1,60,16,2015,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Serial"])) -]) -], -2011,0,0,[ -]). -group([ -polygon('black',13,[ - 1114,710,1114,718,1114,728,1126,728,1178,728,1190,728,1190,718,1190,710, - 1190,700,1178,700,1126,700,1114,700,1114,710],0,1,1,0,2017,0,0,0,0,0,'1', - "2490",[ -]), -box('black',1117,703,1187,725,0,1,0,2018,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',1152,702,'Courier',0,17,1,1,0,1,160,16,2019,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Parallel", 1, 0, 0, -text('black',1152,706,'Courier',0,17,1,1,0,1,80,16,2020,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Parallel"])) -]) -], -2016,0,0,[ -]). -rcbox('black',1088,630,1216,742,0,1,1,0,16,2022,0,0,0,0,'1',[ -]). -arc('black',0,1,1,0,1178,658,1190,672,1190,658,1203,672,1,24,28,5760,-5760,2023,0,2,8,3,0,0,0,'1','8','3',[ -]). -arc('black',0,1,1,0,1203,658,1216,672,1216,686,1203,672,1,26,28,-5760,-5760,2024,0,2,8,3,0,0,0,'1','8','3',[ -]). -arc('black',0,1,1,0,1101,658,1114,672,1114,658,1101,672,0,26,28,5760,5760,2027,0,2,8,3,0,0,0,'1','8','3',[ -]). -arc('black',0,1,1,0,1075,658,1088,672,1088,686,1101,672,0,26,28,-5760,5760,2028,0,2,8,3,0,0,0,'1','8','3',[ -]). -poly('black',2,[ - 1248,686,1216,686],3,1,1,2029,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -group([ -arc('black',0,1,1,0,1101,686,1114,700,1114,714,1101,700,1,26,28,-5760,-5760,2025,0,2,8,3,0,0,0,'1','8','3',[ -]), -arc('black',0,1,1,0,1075,686,1088,700,1088,686,1101,700,1,26,28,5760,-5760,2026,0,2,8,3,0,0,0,'1','8','3',[ -]) -], -2047,0,0,[ -]). -arc('black',0,1,1,0,1203,686,1216,700,1216,686,1203,700,0,26,28,5760,5760,2049,0,2,8,3,0,0,0,'1','8','3',[ -]). -arc('black',0,1,1,0,1177,686,1190,700,1190,714,1203,700,0,26,28,-5760,5760,2050,0,2,8,3,0,0,0,'1','8','3',[ -]). -poly('black',2,[ - 1088,238,1056,238],1,1,1,2077,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -group([ -polygon('black',13,[ - 954,206,954,214,954,224,966,224,1018,224,1030,224,1030,214,1030,206, - 1030,196,1018,196,966,196,954,196,954,206],0,1,1,0,2079,0,0,0,0,0,'1', - "2490",[ -]), -box('black',957,199,1027,221,0,1,0,2080,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',992,198,'Courier',0,17,1,1,0,1,160,16,2081,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "External", 1, 0, 0, -text('black',992,202,'Courier',0,17,1,1,0,1,80,16,2082,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "External"])) -]) -], -2078,0,0,[ -]). -poly('black',2,[ - 928,238,1056,238],0,1,1,2083,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -rcbox('black',928,182,1056,294,0,1,1,0,16,2084,0,0,0,0,'1',[ -]). -arc('black',0,1,1,0,1018,210,1030,224,1030,210,1043,224,1,24,28,5760,-5760,2085,0,2,8,3,0,0,0,'1','8','3',[ -]). -arc('black',0,1,1,0,1043,210,1056,224,1056,238,1043,224,1,26,28,-5760,-5760,2086,0,0,8,3,0,0,0,'1','8','3',[ -]). -arc('black',0,1,1,0,941,210,954,224,954,210,941,224,0,26,28,5760,5760,2087,0,0,8,3,0,0,0,'1','8','3',[ -]). -arc('black',0,1,1,0,915,210,928,224,928,238,941,224,0,26,28,-5760,5760,2088,0,2,8,3,0,0,0,'1','8','3',[ -]). -group([ -polygon('black',13,[ - 1114,206,1114,214,1114,224,1126,224,1178,224,1190,224,1190,214,1190,206, - 1190,196,1178,196,1126,196,1114,196,1114,206],0,1,1,0,2090,0,0,0,0,0,'1', - "2490",[ -]), -box('black',1117,199,1187,221,0,1,0,2091,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',1152,198,'Courier',0,17,1,1,0,1,160,16,2092,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Serial", 1, 0, 0, -text('black',1152,202,'Courier',0,17,1,1,0,1,60,16,2093,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Serial"])) -]) -], -2089,0,0,[ -]). -group([ -polygon('black',13,[ - 1114,262,1114,270,1114,280,1126,280,1178,280,1190,280,1190,270,1190,262, - 1190,252,1178,252,1126,252,1114,252,1114,262],0,1,1,0,2095,0,0,0,0,0,'1', - "2490",[ -]), -box('black',1117,255,1187,277,0,1,0,2096,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',1152,254,'Courier',0,17,1,1,0,1,160,16,2097,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Parallel", 1, 0, 0, -text('black',1152,258,'Courier',0,17,1,1,0,1,80,16,2098,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Parallel"])) -]) -], -2094,0,0,[ -]). -rcbox('black',1088,182,1216,294,0,1,1,0,16,2099,0,0,0,0,'1',[ -]). -arc('black',0,1,1,0,1178,210,1190,224,1190,210,1203,224,1,24,28,5760,-5760,2100,0,2,8,3,0,0,0,'1','8','3',[ -]). -arc('black',0,1,1,0,1203,210,1216,224,1216,238,1203,224,1,26,28,-5760,-5760,2101,0,0,8,3,0,0,0,'1','8','3',[ -]). -arc('black',0,1,1,0,1101,210,1114,224,1114,210,1101,224,0,26,28,5760,5760,2102,0,0,8,3,0,0,0,'1','8','3',[ -]). -arc('black',0,1,1,0,1075,210,1088,224,1088,238,1101,224,0,26,28,-5760,5760,2103,0,2,8,3,0,0,0,'1','8','3',[ -]). -poly('black',2,[ - 1248,238,1216,238],1,1,1,2104,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -arc('black',0,1,1,0,1101,238,1114,252,1114,266,1101,252,1,26,28,-5760,-5760,2106,0,0,8,3,0,0,0,'1','8','3',[ -]). -arc('black',0,1,1,0,1075,238,1088,252,1088,238,1101,252,1,26,28,5760,-5760,2107,0,2,8,3,0,0,0,'1','8','3',[ -]). -arc('black',0,1,1,0,1203,238,1216,252,1216,238,1203,252,0,26,28,5760,5760,2108,0,0,8,3,0,0,0,'1','8','3',[ -]). -arc('black',0,1,1,0,1177,238,1190,252,1190,266,1203,252,0,26,28,-5760,5760,2109,0,2,8,3,0,0,0,'1','8','3',[ -]). -group([ -poly('black',2,[ - 607,662,543,662],1,1,1,1210,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]), -text('black',575,646,'Courier',0,17,2,1,0,1,50,32,1211,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Read", - "Block"]), -poly('black',2,[ - 543,710,607,710],1,1,1,1221,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]), -text('black',575,694,'Courier',0,17,2,1,0,1,50,32,1222,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Write", - "Block"]) -], -2130,0,0,[ -]). -text('black',1152,582,'Courier',0,17,2,1,0,1,100,32,2131,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "I/O", - "Management"]). -text('black',1152,134,'Courier',0,17,2,1,0,1,100,32,2135,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "I/O", - "Management"]). -group([ -polygon('black',13,[ - 442,654,442,662,442,672,454,672,506,672,518,672,518,662,518,654, - 518,644,506,644,454,644,442,644,442,654],0,1,1,0,2137,0,0,0,0,0,'1', - "2490",[ -]), -box('black',445,647,515,669,0,1,0,2138,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',480,646,'Courier',0,17,1,1,0,1,160,16,2139,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Cache", 1, 0, 0, -text('black',480,650,'Courier',0,17,1,1,0,1,50,16,2140,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Cache"])) -]) -], -2136,0,0,[ -]). -poly('black',2,[ - 416,686,544,686],3,1,1,2141,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -rcbox('black',416,630,544,742,0,1,1,0,16,2142,0,0,0,0,'1',[ -]). -arc('black',0,1,1,0,506,658,518,672,518,658,531,672,1,24,28,5760,-5760,2143,0,2,8,3,0,0,0,'1','8','3',[ -]). -arc('black',0,1,1,0,531,658,544,672,544,686,531,672,1,26,28,-5760,-5760,2144,0,2,8,3,0,0,0,'1','8','3',[ -]). -arc('black',0,1,1,0,429,658,442,672,442,658,429,672,0,26,28,5760,5760,2145,0,2,8,3,0,0,0,'1','8','3',[ -]). -arc('black',0,1,1,0,403,658,416,672,416,686,429,672,0,26,28,-5760,5760,2146,0,2,8,3,0,0,0,'1','8','3',[ -]). -group([ -polygon('black',13,[ - 442,206,442,214,442,224,454,224,506,224,518,224,518,214,518,206, - 518,196,506,196,454,196,442,196,442,206],0,1,1,0,2163,0,0,0,0,0,'1', - "2490",[ -]), -box('black',445,199,515,221,0,1,0,2164,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',480,198,'Courier',0,17,1,1,0,1,160,16,2165,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Cache", 1, 0, 0, -text('black',480,202,'Courier',0,17,1,1,0,1,50,16,2166,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Cache"])) -]) -], -2162,0,0,[ -]). -poly('black',2,[ - 416,238,544,238],0,1,1,2167,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -rcbox('black',416,182,544,294,0,1,1,0,16,2168,0,0,0,0,'1',[ -]). -arc('black',0,1,1,0,506,210,518,224,518,210,531,224,1,24,28,5760,-5760,2169,0,2,8,3,0,0,0,'1','8','3',[ -]). -arc('black',0,1,1,0,531,210,544,224,544,238,531,224,1,26,28,-5760,-5760,2170,0,0,8,3,0,0,0,'1','8','3',[ -]). -arc('black',0,1,1,0,429,210,442,224,442,210,429,224,0,26,28,5760,5760,2171,0,0,8,3,0,0,0,'1','8','3',[ -]). -arc('black',0,1,1,0,403,210,416,224,416,238,429,224,0,26,28,-5760,5760,2172,0,2,8,3,0,0,0,'1','8','3',[ -]). -text('black',480,582,'Courier',0,17,2,1,0,1,110,32,2177,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Performance", - "Management"]). -text('black',480,134,'Courier',0,17,2,1,0,1,110,32,2181,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Performance", - "Management"]). -text('black',832,582,'Courier',0,17,2,1,0,1,110,32,2209,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Compression", - "Management"]). -text('black',992,582,'Courier',0,17,2,1,0,1,130,32,2213,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Data Location", - "Management"]). -text('black',832,134,'Courier',0,17,2,1,0,1,110,32,2215,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Compression", - "Management"]). -text('black',992,134,'Courier',0,17,2,1,0,1,130,32,2216,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Data Location", - "Management"]). -arc('black',0,1,1,0,566,685,606,717,606,685,566,717,0,80,64,5760,5760,2229,0,2,8,3,0,0,0,'1','8','3',[ -]). -arc('black',0,1,1,0,565,658,614,724,614,790,565,724,1,98,132,-5760,-5760,2230,0,2,8,3,0,0,0,'1','8','3',[ -]). -arc('black',0,1,1,0,710,247,734,295,734,343,758,295,0,48,96,-5760,5760,2250,0,0,8,3,0,0,0,'1','8','3',[ -]). -group([ -polygon('black',13,[ - 622,319,622,351,622,359,633,359,723,359,734,359,734,351,734,319, - 734,311,723,311,633,311,622,311,622,319],0,1,1,0,2252,0,0,0,0,0,'1', - "2490",[ -]), -box('black',625,313,731,357,0,1,0,2253,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',678,313,'Courier',0,17,1,1,0,1,160,16,2254,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Re-Enter", 1, 0, 0, -text('black',678,319,'Courier',0,17,2,1,0,1,80,32,2255,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Re-Enter", - "Library"])) -]) -], -2251,0,0,[ -]). -arc('black',0,1,1,0,574,238,614,274,614,238,574,274,0,80,72,5760,5760,2256,0,0,8,3,0,0,0,'1','8','3',[ -]). -arc('black',0,1,1,0,574,203,622,273,622,343,574,273,1,96,140,-5760,-5760,2257,0,2,8,3,0,0,0,'1','8','3',[ -]). diff --git a/doc/tgif/RobbPipe.obj b/doc/tgif/RobbPipe.obj deleted file mode 100644 index fa8c2c3..0000000 --- a/doc/tgif/RobbPipe.obj +++ /dev/null @@ -1,136 +0,0 @@ -%TGIF 3.0-p5 -state(0,33,100,0,384,0,16,1,9,1,1,0,2,1,0,1,0,'Courier',0,17,0,0,0,10,0,0,1,1,0,16,0,0,1,1,1,0,1088,1408,0,0,2880). -% -% @(#)$Header$ -% %W% -% -unit("1 pixel/pixel"). -page(1,"",1). -text('black',80,80,'Courier',0,17,2,1,0,1,77,28,32,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Application", - "Buffer"]). -rcbox('black',32,64,128,128,0,1,1,0,16,34,0,0,0,0,'1',[ -]). -rcbox('black',192,64,288,128,0,1,1,0,16,37,0,0,0,0,'1',[ -]). -text('black',240,208,'Courier',0,17,2,1,0,1,49,28,38,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Scratch", - "Buffer"]). -text('black',240,80,'Courier',0,17,2,1,0,1,70,28,41,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Background", - "Buffer"]). -rcbox('black',192,192,288,256,0,1,1,0,16,43,0,0,0,0,'1',[ -]). -text('black',416,80,'Courier',0,17,3,1,0,1,63,42,53,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Array", - "I/O", - "Interface"]). -box('black',384,64,448,320,0,1,1,55,0,0,0,0,0,'1',[ -]). -text('black',512,112,'Courier',0,17,1,1,0,1,77,14,56,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Compression"]). -text('black',512,160,'Courier',0,17,1,1,0,1,56,14,58,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "External"]). -text('black',512,208,'Courier',0,17,1,1,0,1,56,14,60,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Chunking"]). -text('black',512,256,'Courier',0,17,1,1,0,1,105,14,62,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Serial/Parallel"]). -poly('black',2,[ - 384,224,288,224],1,1,1,66,0,0,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -poly('black',2,[ - 128,80,192,80],1,1,1,67,0,0,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -poly('black',3,[ - 272,192,240,160,208,192],1,1,1,68,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -poly('black',2,[ - 240,128,240,160],1,1,1,69,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -poly('black',4,[ - 192,224,160,208,160,128,128,112],1,1,1,70,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -text('black',336,208,'Courier',0,17,1,1,0,1,14,14,73,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "1a"]). -text('black',160,64,'Courier',0,17,1,1,0,1,14,14,75,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "1b"]). -text('black',240,160,'Courier',0,17,1,1,0,1,7,14,79,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "2"]). -text('black',144,160,'Courier',0,17,1,1,0,1,7,14,94,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "3"]). -text('black',64,368,'Courier',0,17,1,0,0,1,476,14,98,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "1a & 1b: First phase of data space conversion. (1b is often omitted)"]). -text('black',64,400,'Courier',0,17,1,0,0,1,161,14,100,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "2: Data type conversion"]). -text('black',64,432,'Courier',0,17,1,0,0,1,280,14,104,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "3: Second phase of data space conversion"]). -text('black',240,16,'Courier',0,17,1,1,0,1,175,14,108,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "R E A D P I P E L I N E"]). -text('black',80,528,'Courier',0,17,2,1,0,1,77,28,110,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Application", - "Buffer"]). -text('black',240,528,'Courier',0,17,2,1,0,1,70,28,114,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Background", - "Buffer"]). -text('black',240,656,'Courier',0,17,2,1,0,1,49,28,116,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Scratch", - "Buffer"]). -text('black',416,528,'Courier',0,17,3,1,0,1,63,42,120,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Array", - "I/O", - "Interface"]). -text('black',512,560,'Courier',0,17,1,1,0,1,77,14,122,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Compression"]). -text('black',512,608,'Courier',0,17,1,1,0,1,56,14,124,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "External"]). -text('black',512,672,'Courier',0,17,1,1,0,1,56,14,126,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Chunking"]). -text('black',512,720,'Courier',0,17,1,1,0,1,105,14,130,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Serial/Parallel"]). -rcbox('black',32,512,128,576,0,1,1,0,16,132,0,0,0,0,'1',[ -]). -rcbox('black',192,512,288,576,0,1,1,0,16,133,0,0,0,0,'1',[ -]). -rcbox('black',192,640,288,704,0,1,1,0,16,134,0,0,0,0,'1',[ -]). -box('black',384,512,448,768,0,1,1,135,0,0,0,0,0,'1',[ -]). -poly('black',4,[ - 128,544,160,576,160,656,192,672],1,1,1,137,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -poly('black',2,[ - 384,544,288,544],1,1,1,143,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -poly('black',3,[ - 208,640,240,608,272,640],1,1,1,144,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -poly('black',2,[ - 240,576,240,608],1,1,1,145,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -poly('black',2,[ - 288,672,384,672],1,1,1,146,2,0,0,0,8,3,0,0,0,'1','8','3', - "",[ -]). -text('black',128,608,'Courier',0,17,1,1,0,1,14,14,149,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "1a"]). -text('black',336,528,'Courier',0,17,1,1,0,1,14,14,151,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "1b"]). -text('black',240,608,'Courier',0,17,1,1,0,1,7,14,155,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "2"]). -text('black',336,656,'Courier',0,17,1,1,0,1,7,14,159,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "3"]). -text('black',64,832,'Courier',0,17,1,0,0,1,469,14,163,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "1a & 1b: First phase of data space conversion (1b is often omitted)"]). -text('black',64,864,'Courier',0,17,1,0,0,1,161,14,167,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "2: Data type conversion"]). -text('black',64,896,'Courier',0,17,1,0,0,1,280,14,171,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "3: Second phase of data space conversion"]). diff --git a/doc/tgif/UserView.obj b/doc/tgif/UserView.obj deleted file mode 100644 index 1a1907f..0000000 --- a/doc/tgif/UserView.obj +++ /dev/null @@ -1,1203 +0,0 @@ -%TGIF 3.0-p9 -state(0,33,100.000,0,448,0,16,1,9,1,1,0,0,1,0,1,1,'Courier',0,17,0,0,0,10,0,0,1,1,0,16,0,0,1,1,1,0,1088,1408,0,0,2880). -% -% @(#)$Header$ -% %W% -% -unit("1 pixel/pixel"). -page(1,"",1). -group([ -polygon('black',13,[ - 32,281,32,1351,32,1376,48,1376,112,1376,128,1376,128,1351,128,281, - 128,256,112,256,48,256,32,256,32,281],0,1,1,0,226,0,0,0,0,0,'1', - "2490",[ -]), -box('black',36,262,124,1370,0,1,0,227,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',80,261,'Courier',0,24,1,1,0,1,224,24,228,0,19,5,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "HDF5", 1, 0, 0, -text('black',80,780,'Courier',0,24,3,1,0,1,56,72,229,0,19,5,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "HDF5", - "File", - "'/'"])) -]) -], -230,0,0,[ -]). -group([ -polygon('black',13,[ - 208,130,208,207,208,208,224,208,288,208,304,208,304,207,304,130, - 304,128,288,128,224,128,208,128,208,130],0,1,1,0,239,0,0,0,0,0,'1', - "2490",[ -]), -box('black',212,129,300,208,0,1,0,240,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',256,129,'Courier',0,24,1,1,0,1,224,24,241,0,19,5,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Directory", 1, 0, 0, -text('black',256,152,'Courier',0,17,2,1,0,1,90,32,242,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Directory", - "'sol'"])) -]) -], -238,0,0,[ -]). -group([ -polygon('black',13,[ - 208,402,208,494,208,496,224,496,288,496,304,496,304,494,304,402, - 304,400,288,400,224,400,208,400,208,402],0,1,1,0,295,0,0,0,0,0,'1', - "2490",[ -]), -box('black',212,401,300,495,0,1,0,296,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',256,401,'Courier',0,24,1,1,0,1,224,24,297,0,19,5,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Directory", 1, 0, 0, -text('black',256,432,'Courier',0,17,2,1,0,1,90,32,298,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Directory", - "'mercury'"])) -]) -], -294,0,0,[ -]). -group([ -polygon('black',13,[ - 208,547,208,701,208,704,224,704,288,704,304,704,304,701,304,547, - 304,544,288,544,224,544,208,544,208,547],0,1,1,0,300,0,0,0,0,0,'1', - "2490",[ -]), -box('black',212,545,300,703,0,1,0,301,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',256,545,'Courier',0,24,1,1,0,1,224,24,302,0,19,5,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Directory", 1, 0, 0, -text('black',256,608,'Courier',0,17,2,1,0,1,90,32,303,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Directory", - "'venus'"])) -]) -], -299,0,0,[ -]). -group([ -polygon('black',13,[ - 208,755,208,909,208,912,224,912,288,912,304,912,304,909,304,755, - 304,752,288,752,224,752,208,752,208,755],0,1,1,0,305,0,0,0,0,0,'1', - "2490",[ -]), -box('black',212,753,300,911,0,1,0,306,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',256,753,'Courier',0,24,1,1,0,1,224,24,307,0,19,5,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Directory", 1, 0, 0, -text('black',256,816,'Courier',0,17,2,1,0,1,90,32,308,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Directory", - "'earth'"])) -]) -], -304,0,0,[ -]). -poly('black',2,[ - 128,448,208,448],1,1,1,315,0,0,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -poly('black',2,[ - 128,304,208,176],1,1,1,317,0,0,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -poly('black',2,[ - 128,608,208,624],1,1,1,318,0,0,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -poly('black',2,[ - 128,832,208,832],1,1,1,319,0,0,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -group([ -group([ -polygon('black',13,[ - 352,77,352,115,352,128,424,128,856,128,928,128,928,115,928,77, - 928,64,856,64,424,64,352,64,352,77],0,1,1,0,22,0,0,0,0,0,'1', - "2490",[ -]), -box('black',370,67,910,125,0,1,0,23,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',640,66,'Courier',0,17,1,1,0,1,160,16,24,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "", 1, 0, 0, -text('black',640,88,'Courier',0,17,1,1,0,1,0,16,25,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - ""])) -]) -], -26,0,0,[ -]), -group([ -polygon('black',13,[ - 592,77,592,115,592,128,606,128,690,128,704,128,704,115,704,77, - 704,64,690,64,606,64,592,64,592,77],0,1,1,0,48,0,0,0,0,0,'1', - "2490",[ -]), -box('black',596,67,701,125,0,1,0,49,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',648,66,'Courier',0,17,1,1,0,1,160,16,50,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Data-Type", 1, 0, 0, -text('black',648,88,'Courier',0,17,1,1,0,1,90,16,51,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Data-Type"])) -]) -], -47,0,0,[ -]), -group([ -polygon('black',13,[ - 480,77,480,115,480,128,494,128,578,128,592,128,592,115,592,77, - 592,64,578,64,494,64,480,64,480,77],0,1,1,0,60,0,0,0,0,0,'1', - "2490",[ -]), -box('black',484,67,589,125,0,1,0,61,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',536,66,'Courier',0,17,1,1,0,1,160,16,62,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Data-Space", 1, 0, 0, -text('black',536,88,'Courier',0,17,1,1,0,1,100,16,63,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Data-Space"])) -]) -], -59,0,0,[ -]), -group([ -polygon('black',13,[ - 816,74,816,118,816,128,826,128,917,128,928,128,928,118,928,74, - 928,64,917,64,826,64,816,64,816,74],0,1,1,0,82,0,0,0,0,0,'1', - "2490",[ -]), -box('black',819,66,925,126,0,1,0,83,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',872,66,'Courier',0,17,1,1,0,1,160,16,84,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Optional", 1, 0, 0, -text('black',872,80,'Courier',0,17,2,1,0,1,90,32,85,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Optional", - "Meta-Data"])) -]) -], -86,0,0,[ -]), -group([ -polygon('black',13,[ - 704,74,704,118,704,128,714,128,805,128,816,128,816,118,816,74, - 816,64,805,64,714,64,704,64,704,74],0,1,1,0,162,0,0,0,0,0,'1', - "2490",[ -]), -box('black',707,66,813,126,0,1,0,163,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',760,66,'Courier',0,17,1,1,0,1,160,16,164,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Science", 1, 0, 0, -text('black',760,80,'Courier',0,17,2,1,0,1,70,32,165,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Science", - "Data"])) -]) -], -161,0,0,[ -]), -text('black',416,80,'Courier',0,24,2,1,0,1,98,48,181,0,19,5,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Dataset", - "'temp'"]) -], -435,0,0,[ -]). -group([ -group([ -polygon('black',13,[ - 352,173,352,211,352,224,424,224,856,224,928,224,928,211,928,173, - 928,160,856,160,424,160,352,160,352,173],0,1,1,0,438,0,0,0,0,0,'1', - "2490",[ -]), -box('black',370,163,910,221,0,1,0,439,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',640,162,'Courier',0,17,1,1,0,1,160,16,440,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "", 1, 0, 0, -text('black',640,184,'Courier',0,17,1,1,0,1,0,16,441,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - ""])) -]) -], -437,0,0,[ -]), -group([ -polygon('black',13,[ - 592,173,592,211,592,224,606,224,690,224,704,224,704,211,704,173, - 704,160,690,160,606,160,592,160,592,173],0,1,1,0,443,0,0,0,0,0,'1', - "2490",[ -]), -box('black',596,163,701,221,0,1,0,444,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',648,162,'Courier',0,17,1,1,0,1,160,16,445,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Data-Type", 1, 0, 0, -text('black',648,184,'Courier',0,17,1,1,0,1,90,16,446,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Data-Type"])) -]) -], -442,0,0,[ -]), -group([ -polygon('black',13,[ - 480,173,480,211,480,224,494,224,578,224,592,224,592,211,592,173, - 592,160,578,160,494,160,480,160,480,173],0,1,1,0,448,0,0,0,0,0,'1', - "2490",[ -]), -box('black',484,163,589,221,0,1,0,449,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',536,162,'Courier',0,17,1,1,0,1,160,16,450,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Data-Space", 1, 0, 0, -text('black',536,184,'Courier',0,17,1,1,0,1,100,16,451,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Data-Space"])) -]) -], -447,0,0,[ -]), -group([ -polygon('black',13,[ - 816,170,816,214,816,224,826,224,917,224,928,224,928,214,928,170, - 928,160,917,160,826,160,816,160,816,170],0,1,1,0,453,0,0,0,0,0,'1', - "2490",[ -]), -box('black',819,162,925,222,0,1,0,454,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',872,162,'Courier',0,17,1,1,0,1,160,16,455,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Optional", 1, 0, 0, -text('black',872,176,'Courier',0,17,2,1,0,1,90,32,456,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Optional", - "Meta-Data"])) -]) -], -452,0,0,[ -]), -group([ -polygon('black',13,[ - 704,170,704,214,704,224,714,224,805,224,816,224,816,214,816,170, - 816,160,805,160,714,160,704,160,704,170],0,1,1,0,458,0,0,0,0,0,'1', - "2490",[ -]), -box('black',707,162,813,222,0,1,0,459,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',760,162,'Courier',0,17,1,1,0,1,160,16,460,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Science", 1, 0, 0, -text('black',760,176,'Courier',0,17,2,1,0,1,70,32,461,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Science", - "Data"])) -]) -], -457,0,0,[ -]), -text('black',416,176,'Courier',0,24,2,1,0,1,98,48,462,0,19,5,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Dataset", - "'time'"]) -], -436,0,0,[ -]). -group([ -group([ -polygon('black',13,[ - 352,269,352,307,352,320,424,320,856,320,928,320,928,307,928,269, - 928,256,856,256,424,256,352,256,352,269],0,1,1,0,465,0,0,0,0,0,'1', - "2490",[ -]), -box('black',370,259,910,317,0,1,0,466,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',640,258,'Courier',0,17,1,1,0,1,160,16,467,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "", 1, 0, 0, -text('black',640,280,'Courier',0,17,1,1,0,1,0,16,468,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - ""])) -]) -], -464,0,0,[ -]), -group([ -polygon('black',13,[ - 592,269,592,307,592,320,606,320,690,320,704,320,704,307,704,269, - 704,256,690,256,606,256,592,256,592,269],0,1,1,0,470,0,0,0,0,0,'1', - "2490",[ -]), -box('black',596,259,701,317,0,1,0,471,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',648,258,'Courier',0,17,1,1,0,1,160,16,472,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Data-Type", 1, 0, 0, -text('black',648,280,'Courier',0,17,1,1,0,1,90,16,473,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Data-Type"])) -]) -], -469,0,0,[ -]), -group([ -polygon('black',13,[ - 480,269,480,307,480,320,494,320,578,320,592,320,592,307,592,269, - 592,256,578,256,494,256,480,256,480,269],0,1,1,0,475,0,0,0,0,0,'1', - "2490",[ -]), -box('black',484,259,589,317,0,1,0,476,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',536,258,'Courier',0,17,1,1,0,1,160,16,477,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Data-Space", 1, 0, 0, -text('black',536,280,'Courier',0,17,1,1,0,1,100,16,478,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Data-Space"])) -]) -], -474,0,0,[ -]), -group([ -polygon('black',13,[ - 816,266,816,310,816,320,826,320,917,320,928,320,928,310,928,266, - 928,256,917,256,826,256,816,256,816,266],0,1,1,0,480,0,0,0,0,0,'1', - "2490",[ -]), -box('black',819,258,925,318,0,1,0,481,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',872,258,'Courier',0,17,1,1,0,1,160,16,482,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Optional", 1, 0, 0, -text('black',872,272,'Courier',0,17,2,1,0,1,90,32,483,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Optional", - "Meta-Data"])) -]) -], -479,0,0,[ -]), -group([ -polygon('black',13,[ - 704,266,704,310,704,320,714,320,805,320,816,320,816,310,816,266, - 816,256,805,256,714,256,704,256,704,266],0,1,1,0,485,0,0,0,0,0,'1', - "2490",[ -]), -box('black',707,258,813,318,0,1,0,486,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',760,258,'Courier',0,17,1,1,0,1,160,16,487,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Science", 1, 0, 0, -text('black',760,272,'Courier',0,17,2,1,0,1,70,32,488,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Science", - "Data"])) -]) -], -484,0,0,[ -]), -text('black',416,272,'Courier',0,24,2,1,0,1,98,48,489,0,19,5,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Dataset", - "'size'"]) -], -463,0,0,[ -]). -group([ -polygon('black',13,[ - 336,433,336,495,336,496,352,496,416,496,432,496,432,495,432,433, - 432,432,416,432,352,432,336,432,336,433],0,1,1,0,491,0,0,0,0,0,'1', - "2490",[ -]), -box('black',340,433,428,495,0,1,0,492,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',384,433,'Courier',0,24,1,1,0,1,224,24,493,0,19,5,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Directory", 1, 0, 0, -text('black',384,448,'Courier',0,17,2,1,0,1,90,32,494,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Directory", - "'moons'"])) -]) -], -490,0,0,[ -]). -group([ -polygon('black',13,[ - 352,753,352,815,352,816,368,816,432,816,448,816,448,815,448,753, - 448,752,432,752,368,752,352,752,352,753],0,1,1,0,511,0,0,0,0,0,'1', - "2490",[ -]), -box('black',356,753,444,815,0,1,0,512,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',400,753,'Courier',0,24,1,1,0,1,224,24,513,0,19,5,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Directory", 1, 0, 0, -text('black',400,768,'Courier',0,17,2,1,0,1,90,32,514,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Directory", - "'planet'"])) -]) -], -510,0,0,[ -]). -group([ -polygon('black',13,[ - 352,881,352,943,352,944,368,944,432,944,448,944,448,943,448,881, - 448,880,432,880,368,880,352,880,352,881],0,1,1,0,516,0,0,0,0,0,'1', - "2490",[ -]), -box('black',356,881,444,943,0,1,0,517,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',400,881,'Courier',0,24,1,1,0,1,224,24,518,0,19,5,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Directory", 1, 0, 0, -text('black',400,896,'Courier',0,17,2,1,0,1,90,32,519,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Directory", - "'moons'"])) -]) -], -515,0,0,[ -]). -poly('black',2,[ - 304,160,352,96],1,1,1,528,0,0,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -poly('black',2,[ - 304,160,352,192],1,1,1,529,0,0,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -poly('black',2,[ - 304,160,352,288],1,1,1,530,0,0,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -poly('black',2,[ - 304,448,336,464],1,1,1,532,0,0,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -poly('black',2,[ - 304,832,352,784],1,1,1,533,0,0,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -poly('black',2,[ - 304,832,352,912],1,1,1,534,0,0,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -poly('black',2,[ - 304,448,352,384],1,1,1,535,0,0,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -group([ -group([ -polygon('black',13,[ - 352,365,352,403,352,416,424,416,856,416,928,416,928,403,928,365, - 928,352,856,352,424,352,352,352,352,365],0,1,1,0,538,0,0,0,0,0,'1', - "2490",[ -]), -box('black',370,355,910,413,0,1,0,539,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',640,354,'Courier',0,17,1,1,0,1,160,16,540,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "", 1, 0, 0, -text('black',640,376,'Courier',0,17,1,1,0,1,0,16,541,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - ""])) -]) -], -537,0,0,[ -]), -group([ -polygon('black',13,[ - 592,365,592,403,592,416,606,416,690,416,704,416,704,403,704,365, - 704,352,690,352,606,352,592,352,592,365],0,1,1,0,543,0,0,0,0,0,'1', - "2490",[ -]), -box('black',596,355,701,413,0,1,0,544,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',648,354,'Courier',0,17,1,1,0,1,160,16,545,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Data-Type", 1, 0, 0, -text('black',648,376,'Courier',0,17,1,1,0,1,90,16,546,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Data-Type"])) -]) -], -542,0,0,[ -]), -group([ -polygon('black',13,[ - 480,365,480,403,480,416,494,416,578,416,592,416,592,403,592,365, - 592,352,578,352,494,352,480,352,480,365],0,1,1,0,548,0,0,0,0,0,'1', - "2490",[ -]), -box('black',484,355,589,413,0,1,0,549,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',536,354,'Courier',0,17,1,1,0,1,160,16,550,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Data-Space", 1, 0, 0, -text('black',536,376,'Courier',0,17,1,1,0,1,100,16,551,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Data-Space"])) -]) -], -547,0,0,[ -]), -group([ -polygon('black',13,[ - 816,362,816,406,816,416,826,416,917,416,928,416,928,406,928,362, - 928,352,917,352,826,352,816,352,816,362],0,1,1,0,553,0,0,0,0,0,'1', - "2490",[ -]), -box('black',819,354,925,414,0,1,0,554,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',872,354,'Courier',0,17,1,1,0,1,160,16,555,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Optional", 1, 0, 0, -text('black',872,368,'Courier',0,17,2,1,0,1,90,32,556,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Optional", - "Meta-Data"])) -]) -], -552,0,0,[ -]), -group([ -polygon('black',13,[ - 704,362,704,406,704,416,714,416,805,416,816,416,816,406,816,362, - 816,352,805,352,714,352,704,352,704,362],0,1,1,0,558,0,0,0,0,0,'1', - "2490",[ -]), -box('black',707,354,813,414,0,1,0,559,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',760,354,'Courier',0,17,1,1,0,1,160,16,560,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Science", 1, 0, 0, -text('black',760,368,'Courier',0,17,2,1,0,1,70,32,561,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Science", - "Data"])) -]) -], -557,0,0,[ -]), -text('black',416,368,'Courier',0,24,2,1,0,1,112,48,562,0,19,5,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Dataset", - "'volume'"]) -], -536,0,0,[ -]). -group([ -group([ -polygon('black',13,[ - 480,701,480,739,480,752,552,752,984,752,1056,752,1056,739,1056,701, - 1056,688,984,688,552,688,480,688,480,701],0,1,1,0,565,0,0,0,0,0,'1', - "2490",[ -]), -box('black',498,691,1038,749,0,1,0,566,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',768,690,'Courier',0,17,1,1,0,1,160,16,567,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "", 1, 0, 0, -text('black',768,712,'Courier',0,17,1,1,0,1,0,16,568,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - ""])) -]) -], -564,0,0,[ -]), -group([ -polygon('black',13,[ - 720,701,720,739,720,752,734,752,818,752,832,752,832,739,832,701, - 832,688,818,688,734,688,720,688,720,701],0,1,1,0,570,0,0,0,0,0,'1', - "2490",[ -]), -box('black',724,691,829,749,0,1,0,571,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',776,690,'Courier',0,17,1,1,0,1,160,16,572,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Data-Type", 1, 0, 0, -text('black',776,712,'Courier',0,17,1,1,0,1,90,16,573,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Data-Type"])) -]) -], -569,0,0,[ -]), -group([ -polygon('black',13,[ - 608,701,608,739,608,752,622,752,706,752,720,752,720,739,720,701, - 720,688,706,688,622,688,608,688,608,701],0,1,1,0,575,0,0,0,0,0,'1', - "2490",[ -]), -box('black',612,691,717,749,0,1,0,576,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',664,690,'Courier',0,17,1,1,0,1,160,16,577,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Data-Space", 1, 0, 0, -text('black',664,712,'Courier',0,17,1,1,0,1,100,16,578,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Data-Space"])) -]) -], -574,0,0,[ -]), -group([ -polygon('black',13,[ - 944,698,944,742,944,752,954,752,1045,752,1056,752,1056,742,1056,698, - 1056,688,1045,688,954,688,944,688,944,698],0,1,1,0,580,0,0,0,0,0,'1', - "2490",[ -]), -box('black',947,690,1053,750,0,1,0,581,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',1000,690,'Courier',0,17,1,1,0,1,160,16,582,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Optional", 1, 0, 0, -text('black',1000,704,'Courier',0,17,2,1,0,1,90,32,583,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Optional", - "Meta-Data"])) -]) -], -579,0,0,[ -]), -group([ -polygon('black',13,[ - 832,698,832,742,832,752,842,752,933,752,944,752,944,742,944,698, - 944,688,933,688,842,688,832,688,832,698],0,1,1,0,585,0,0,0,0,0,'1', - "2490",[ -]), -box('black',835,690,941,750,0,1,0,586,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',888,690,'Courier',0,17,1,1,0,1,160,16,587,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Science", 1, 0, 0, -text('black',888,704,'Courier',0,17,2,1,0,1,70,32,588,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Science", - "Data"])) -]) -], -584,0,0,[ -]), -text('black',544,704,'Courier',0,24,2,1,0,1,98,48,589,0,19,5,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Dataset", - "'temp'"]) -], -563,0,0,[ -]). -group([ -group([ -polygon('black',13,[ - 480,781,480,819,480,832,552,832,984,832,1056,832,1056,819,1056,781, - 1056,768,984,768,552,768,480,768,480,781],0,1,1,0,592,0,0,0,0,0,'1', - "2490",[ -]), -box('black',498,771,1038,829,0,1,0,593,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',768,770,'Courier',0,17,1,1,0,1,160,16,594,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "", 1, 0, 0, -text('black',768,792,'Courier',0,17,1,1,0,1,0,16,595,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - ""])) -]) -], -591,0,0,[ -]), -group([ -polygon('black',13,[ - 720,781,720,819,720,832,734,832,818,832,832,832,832,819,832,781, - 832,768,818,768,734,768,720,768,720,781],0,1,1,0,597,0,0,0,0,0,'1', - "2490",[ -]), -box('black',724,771,829,829,0,1,0,598,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',776,770,'Courier',0,17,1,1,0,1,160,16,599,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Data-Type", 1, 0, 0, -text('black',776,792,'Courier',0,17,1,1,0,1,90,16,600,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Data-Type"])) -]) -], -596,0,0,[ -]), -group([ -polygon('black',13,[ - 608,781,608,819,608,832,622,832,706,832,720,832,720,819,720,781, - 720,768,706,768,622,768,608,768,608,781],0,1,1,0,602,0,0,0,0,0,'1', - "2490",[ -]), -box('black',612,771,717,829,0,1,0,603,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',664,770,'Courier',0,17,1,1,0,1,160,16,604,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Data-Space", 1, 0, 0, -text('black',664,792,'Courier',0,17,1,1,0,1,100,16,605,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Data-Space"])) -]) -], -601,0,0,[ -]), -group([ -polygon('black',13,[ - 944,778,944,822,944,832,954,832,1045,832,1056,832,1056,822,1056,778, - 1056,768,1045,768,954,768,944,768,944,778],0,1,1,0,607,0,0,0,0,0,'1', - "2490",[ -]), -box('black',947,770,1053,830,0,1,0,608,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',1000,770,'Courier',0,17,1,1,0,1,160,16,609,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Optional", 1, 0, 0, -text('black',1000,784,'Courier',0,17,2,1,0,1,90,32,610,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Optional", - "Meta-Data"])) -]) -], -606,0,0,[ -]), -group([ -polygon('black',13,[ - 832,778,832,822,832,832,842,832,933,832,944,832,944,822,944,778, - 944,768,933,768,842,768,832,768,832,778],0,1,1,0,612,0,0,0,0,0,'1', - "2490",[ -]), -box('black',835,770,941,830,0,1,0,613,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',888,770,'Courier',0,17,1,1,0,1,160,16,614,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Science", 1, 0, 0, -text('black',888,784,'Courier',0,17,2,1,0,1,70,32,615,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Science", - "Data"])) -]) -], -611,0,0,[ -]), -text('black',544,784,'Courier',0,24,2,1,0,1,98,48,616,0,19,5,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Dataset", - "'size'"]) -], -590,0,0,[ -]). -group([ -group([ -polygon('black',13,[ - 480,909,480,947,480,960,552,960,984,960,1056,960,1056,947,1056,909, - 1056,896,984,896,552,896,480,896,480,909],0,1,1,0,619,0,0,0,0,0,'1', - "2490",[ -]), -box('black',498,899,1038,957,0,1,0,620,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',768,898,'Courier',0,17,1,1,0,1,160,16,621,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "", 1, 0, 0, -text('black',768,920,'Courier',0,17,1,1,0,1,0,16,622,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - ""])) -]) -], -618,0,0,[ -]), -group([ -polygon('black',13,[ - 720,909,720,947,720,960,734,960,818,960,832,960,832,947,832,909, - 832,896,818,896,734,896,720,896,720,909],0,1,1,0,624,0,0,0,0,0,'1', - "2490",[ -]), -box('black',724,899,829,957,0,1,0,625,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',776,898,'Courier',0,17,1,1,0,1,160,16,626,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Data-Type", 1, 0, 0, -text('black',776,920,'Courier',0,17,1,1,0,1,90,16,627,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Data-Type"])) -]) -], -623,0,0,[ -]), -group([ -polygon('black',13,[ - 608,909,608,947,608,960,622,960,706,960,720,960,720,947,720,909, - 720,896,706,896,622,896,608,896,608,909],0,1,1,0,629,0,0,0,0,0,'1', - "2490",[ -]), -box('black',612,899,717,957,0,1,0,630,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',664,898,'Courier',0,17,1,1,0,1,160,16,631,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Data-Space", 1, 0, 0, -text('black',664,920,'Courier',0,17,1,1,0,1,100,16,632,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Data-Space"])) -]) -], -628,0,0,[ -]), -group([ -polygon('black',13,[ - 944,906,944,950,944,960,954,960,1045,960,1056,960,1056,950,1056,906, - 1056,896,1045,896,954,896,944,896,944,906],0,1,1,0,634,0,0,0,0,0,'1', - "2490",[ -]), -box('black',947,898,1053,958,0,1,0,635,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',1000,898,'Courier',0,17,1,1,0,1,160,16,636,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Optional", 1, 0, 0, -text('black',1000,912,'Courier',0,17,2,1,0,1,90,32,637,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Optional", - "Meta-Data"])) -]) -], -633,0,0,[ -]), -group([ -polygon('black',13,[ - 832,906,832,950,832,960,842,960,933,960,944,960,944,950,944,906, - 944,896,933,896,842,896,832,896,832,906],0,1,1,0,639,0,0,0,0,0,'1', - "2490",[ -]), -box('black',835,898,941,958,0,1,0,640,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',888,898,'Courier',0,17,1,1,0,1,160,16,641,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Science", 1, 0, 0, -text('black',888,912,'Courier',0,17,2,1,0,1,70,32,642,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Science", - "Data"])) -]) -], -638,0,0,[ -]), -text('black',544,912,'Courier',0,24,2,1,0,1,98,48,643,0,19,5,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Dataset", - "'size'"]) -], -617,0,0,[ -]). -poly('black',2,[ - 448,784,480,720],1,1,1,644,0,0,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -poly('black',2,[ - 448,784,480,800],1,1,1,645,0,0,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -poly('black',2,[ - 448,912,480,928],1,1,1,646,0,0,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -group([ -group([ -polygon('black',13,[ - 160,1005,160,1043,160,1056,232,1056,664,1056,736,1056,736,1043,736,1005, - 736,992,664,992,232,992,160,992,160,1005],0,1,1,0,654,0,0,0,0,0,'1', - "2490",[ -]), -box('black',178,995,718,1053,0,1,0,655,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',448,994,'Courier',0,17,1,1,0,1,160,16,656,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "", 1, 0, 0, -text('black',448,1016,'Courier',0,17,1,1,0,1,0,16,657,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - ""])) -]) -], -653,0,0,[ -]), -group([ -polygon('black',13,[ - 400,1005,400,1043,400,1056,414,1056,498,1056,512,1056,512,1043,512,1005, - 512,992,498,992,414,992,400,992,400,1005],0,1,1,0,659,0,0,0,0,0,'1', - "2490",[ -]), -box('black',404,995,509,1053,0,1,0,660,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',456,994,'Courier',0,17,1,1,0,1,160,16,661,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Data-Type", 1, 0, 0, -text('black',456,1016,'Courier',0,17,1,1,0,1,90,16,662,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Data-Type"])) -]) -], -658,0,0,[ -]), -group([ -polygon('black',13,[ - 288,1005,288,1043,288,1056,302,1056,386,1056,400,1056,400,1043,400,1005, - 400,992,386,992,302,992,288,992,288,1005],0,1,1,0,664,0,0,0,0,0,'1', - "2490",[ -]), -box('black',292,995,397,1053,0,1,0,665,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',344,994,'Courier',0,17,1,1,0,1,160,16,666,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Data-Space", 1, 0, 0, -text('black',344,1016,'Courier',0,17,1,1,0,1,100,16,667,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Data-Space"])) -]) -], -663,0,0,[ -]), -group([ -polygon('black',13,[ - 624,1002,624,1046,624,1056,634,1056,725,1056,736,1056,736,1046,736,1002, - 736,992,725,992,634,992,624,992,624,1002],0,1,1,0,669,0,0,0,0,0,'1', - "2490",[ -]), -box('black',627,994,733,1054,0,1,0,670,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',680,994,'Courier',0,17,1,1,0,1,160,16,671,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Optional", 1, 0, 0, -text('black',680,1008,'Courier',0,17,2,1,0,1,90,32,672,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Optional", - "Meta-Data"])) -]) -], -668,0,0,[ -]), -group([ -polygon('black',13,[ - 512,1002,512,1046,512,1056,522,1056,613,1056,624,1056,624,1046,624,1002, - 624,992,613,992,522,992,512,992,512,1002],0,1,1,0,674,0,0,0,0,0,'1', - "2490",[ -]), -box('black',515,994,621,1054,0,1,0,675,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',568,994,'Courier',0,17,1,1,0,1,160,16,676,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Science", 1, 0, 0, -text('black',568,1008,'Courier',0,17,2,1,0,1,70,32,677,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Science", - "Data"])) -]) -], -673,0,0,[ -]), -text('black',224,1008,'Courier',0,24,2,1,0,1,98,48,678,0,19,5,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Dataset", - "'time'"]) -], -652,0,0,[ -]). -group([ -group([ -polygon('black',13,[ - 160,1085,160,1123,160,1136,232,1136,664,1136,736,1136,736,1123,736,1085, - 736,1072,664,1072,232,1072,160,1072,160,1085],0,1,1,0,691,0,0,0,0,0,'1', - "2490",[ -]), -box('black',178,1075,718,1133,0,1,0,692,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',448,1074,'Courier',0,17,1,1,0,1,160,16,693,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "", 1, 0, 0, -text('black',448,1096,'Courier',0,17,1,1,0,1,0,16,694,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - ""])) -]) -], -690,0,0,[ -]), -group([ -polygon('black',13,[ - 400,1085,400,1123,400,1136,414,1136,498,1136,512,1136,512,1123,512,1085, - 512,1072,498,1072,414,1072,400,1072,400,1085],0,1,1,0,696,0,0,0,0,0,'1', - "2490",[ -]), -box('black',404,1075,509,1133,0,1,0,697,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',456,1074,'Courier',0,17,1,1,0,1,160,16,698,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Data-Type", 1, 0, 0, -text('black',456,1096,'Courier',0,17,1,1,0,1,90,16,699,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Data-Type"])) -]) -], -695,0,0,[ -]), -group([ -polygon('black',13,[ - 288,1085,288,1123,288,1136,302,1136,386,1136,400,1136,400,1123,400,1085, - 400,1072,386,1072,302,1072,288,1072,288,1085],0,1,1,0,701,0,0,0,0,0,'1', - "2490",[ -]), -box('black',292,1075,397,1133,0,1,0,702,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',344,1074,'Courier',0,17,1,1,0,1,160,16,703,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Data-Space", 1, 0, 0, -text('black',344,1096,'Courier',0,17,1,1,0,1,100,16,704,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Data-Space"])) -]) -], -700,0,0,[ -]), -group([ -polygon('black',13,[ - 624,1082,624,1126,624,1136,634,1136,725,1136,736,1136,736,1126,736,1082, - 736,1072,725,1072,634,1072,624,1072,624,1082],0,1,1,0,706,0,0,0,0,0,'1', - "2490",[ -]), -box('black',627,1074,733,1134,0,1,0,707,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',680,1074,'Courier',0,17,1,1,0,1,160,16,708,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Optional", 1, 0, 0, -text('black',680,1088,'Courier',0,17,2,1,0,1,90,32,709,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Optional", - "Meta-Data"])) -]) -], -705,0,0,[ -]), -group([ -polygon('black',13,[ - 512,1082,512,1126,512,1136,522,1136,613,1136,624,1136,624,1126,624,1082, - 624,1072,613,1072,522,1072,512,1072,512,1082],0,1,1,0,711,0,0,0,0,0,'1', - "2490",[ -]), -box('black',515,1074,621,1134,0,1,0,712,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',568,1074,'Courier',0,17,1,1,0,1,160,16,713,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Science", 1, 0, 0, -text('black',568,1088,'Courier',0,17,2,1,0,1,70,32,714,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Science", - "Data"])) -]) -], -710,0,0,[ -]), -text('black',224,1088,'Courier',0,24,2,1,0,1,98,48,715,0,19,5,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Dataset", - "'vel'"]) -], -689,0,0,[ -]). -group([ -group([ -polygon('black',13,[ - 160,1165,160,1203,160,1216,232,1216,664,1216,736,1216,736,1203,736,1165, - 736,1152,664,1152,232,1152,160,1152,160,1165],0,1,1,0,718,0,0,0,0,0,'1', - "2490",[ -]), -box('black',178,1155,718,1213,0,1,0,719,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',448,1154,'Courier',0,17,1,1,0,1,160,16,720,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "", 1, 0, 0, -text('black',448,1176,'Courier',0,17,1,1,0,1,0,16,721,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - ""])) -]) -], -717,0,0,[ -]), -group([ -polygon('black',13,[ - 400,1165,400,1203,400,1216,414,1216,498,1216,512,1216,512,1203,512,1165, - 512,1152,498,1152,414,1152,400,1152,400,1165],0,1,1,0,723,0,0,0,0,0,'1', - "2490",[ -]), -box('black',404,1155,509,1213,0,1,0,724,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',456,1154,'Courier',0,17,1,1,0,1,160,16,725,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Data-Type", 1, 0, 0, -text('black',456,1176,'Courier',0,17,1,1,0,1,90,16,726,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Data-Type"])) -]) -], -722,0,0,[ -]), -group([ -polygon('black',13,[ - 288,1165,288,1203,288,1216,302,1216,386,1216,400,1216,400,1203,400,1165, - 400,1152,386,1152,302,1152,288,1152,288,1165],0,1,1,0,728,0,0,0,0,0,'1', - "2490",[ -]), -box('black',292,1155,397,1213,0,1,0,729,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',344,1154,'Courier',0,17,1,1,0,1,160,16,730,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Data-Space", 1, 0, 0, -text('black',344,1176,'Courier',0,17,1,1,0,1,100,16,731,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Data-Space"])) -]) -], -727,0,0,[ -]), -group([ -polygon('black',13,[ - 624,1162,624,1206,624,1216,634,1216,725,1216,736,1216,736,1206,736,1162, - 736,1152,725,1152,634,1152,624,1152,624,1162],0,1,1,0,733,0,0,0,0,0,'1', - "2490",[ -]), -box('black',627,1154,733,1214,0,1,0,734,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',680,1154,'Courier',0,17,1,1,0,1,160,16,735,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Optional", 1, 0, 0, -text('black',680,1168,'Courier',0,17,2,1,0,1,90,32,736,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Optional", - "Meta-Data"])) -]) -], -732,0,0,[ -]), -group([ -polygon('black',13,[ - 512,1162,512,1206,512,1216,522,1216,613,1216,624,1216,624,1206,624,1162, - 624,1152,613,1152,522,1152,512,1152,512,1162],0,1,1,0,738,0,0,0,0,0,'1', - "2490",[ -]), -box('black',515,1154,621,1214,0,1,0,739,0,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',568,1154,'Courier',0,17,1,1,0,1,160,16,740,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Science", 1, 0, 0, -text('black',568,1168,'Courier',0,17,2,1,0,1,70,32,741,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Science", - "Data"])) -]) -], -737,0,0,[ -]), -text('black',224,1168,'Courier',0,24,2,1,0,1,98,48,742,0,19,5,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Dataset", - "'size'"]) -], -716,0,0,[ -]). -poly('black',2,[ - 128,1024,160,1024],1,1,1,743,0,0,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -poly('black',2,[ - 128,1104,160,1104],1,1,1,744,0,0,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -poly('black',2,[ - 128,1184,160,1184],1,1,1,745,0,0,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -group([ -polygon('black',13,[ - 160,1233,160,1295,160,1296,197,1296,347,1296,384,1296,384,1295,384,1233, - 384,1232,347,1232,197,1232,160,1232,160,1233],0,1,1,0,752,2,0,0,0,0,'1', - "2490",[ -]), -box('black',169,1232,375,1296,0,1,0,753,2,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',272,1232,'Courier',0,24,1,1,0,1,224,24,754,0,19,5,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Small-Data Heap", 1, 0, 0, -text('black',272,1256,'Courier',0,17,1,1,0,1,150,16,755,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Small-Data Heap"])) -]) -], -751,0,0,[ -]). -group([ -polygon('black',13,[ - 160,1313,160,1375,160,1376,197,1376,347,1376,384,1376,384,1375,384,1313, - 384,1312,347,1312,197,1312,160,1312,160,1313],0,1,1,0,784,2,0,0,0,0,'1', - "2490",[ -]), -box('black',169,1312,375,1376,0,1,0,785,2,0,0,0,0,'1',[ -attr("", "auto_center_attr", 0, 1, 0, -text('black',272,1312,'Courier',0,24,1,1,0,1,224,24,786,0,19,5,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "auto_center_attr"])), -attr("label=", "Free-Space List", 1, 0, 0, -text('black',272,1336,'Courier',0,17,1,1,0,1,150,16,787,0,13,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ - "Free-Space List"])) -]) -], -783,0,0,[ -]). -poly('black',2,[ - 128,1264,160,1264],1,1,1,805,0,0,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -poly('black',2,[ - 128,1344,160,1344],1,1,1,806,0,0,0,0,8,3,0,0,0,'1','8','3', - "0",[ -]). -- cgit v0.12