From 2e8cd59163cda6b051eae53aac58f54e0a9e9351 Mon Sep 17 00:00:00 2001 From: Frank Baker Date: Fri, 25 Aug 2000 12:42:24 -0500 Subject: [svn-r2482] Bringing "HDF5 Technical Notes" into development branch (from R1.2 branch) --- doc/html/TechNotes/BigDataSmMach.html | 122 ++++++++ doc/html/TechNotes/ChStudy_1000x1000.gif | Bin 0 -> 6594 bytes doc/html/TechNotes/ChStudy_250x250.gif | Bin 0 -> 6914 bytes doc/html/TechNotes/ChStudy_499x499.gif | Bin 0 -> 10429 bytes doc/html/TechNotes/ChStudy_5000x1000.gif | Bin 0 -> 10653 bytes doc/html/TechNotes/ChStudy_500x500.gif | Bin 0 -> 6842 bytes doc/html/TechNotes/ChStudy_p1.gif | Bin 0 -> 6550 bytes doc/html/TechNotes/ChStudy_p1.obj | 113 +++++++ doc/html/TechNotes/ChunkingStudy.html | 190 ++++++++++++ doc/html/TechNotes/CodeReview.html | 300 ++++++++++++++++++ doc/html/TechNotes/ExternalFiles.html | 279 +++++++++++++++++ doc/html/TechNotes/FreeLists.html | 205 +++++++++++++ doc/html/TechNotes/H4-H5Compat.html | 271 ++++++++++++++++ doc/html/TechNotes/HeapMgmt.html | 84 +++++ doc/html/TechNotes/IOPipe.html | 114 +++++++ doc/html/TechNotes/LibMaint.html | 128 ++++++++ doc/html/TechNotes/MemoryMgmt.html | 510 +++++++++++++++++++++++++++++++ doc/html/TechNotes/MoveDStruct.html | 66 ++++ doc/html/TechNotes/NamingScheme.html | 300 ++++++++++++++++++ doc/html/TechNotes/ObjectHeader.html | 72 +++++ doc/html/TechNotes/RawDStorage.html | 274 +++++++++++++++++ doc/html/TechNotes/SymbolTables.html | 329 ++++++++++++++++++++ doc/html/TechNotes/Version.html | 137 +++++++++ doc/html/TechNotes/pipe1.gif | Bin 0 -> 10110 bytes doc/html/TechNotes/pipe1.obj | 136 +++++++++ doc/html/TechNotes/pipe2.gif | Bin 0 -> 11715 bytes doc/html/TechNotes/pipe2.obj | 168 ++++++++++ doc/html/TechNotes/pipe3.gif | Bin 0 -> 6961 bytes doc/html/TechNotes/pipe3.obj | 70 +++++ doc/html/TechNotes/pipe4.gif | Bin 0 -> 8355 bytes doc/html/TechNotes/pipe4.obj | 92 ++++++ doc/html/TechNotes/pipe5.gif | Bin 0 -> 6217 bytes doc/html/TechNotes/pipe5.obj | 52 ++++ doc/html/TechNotes/version.gif | Bin 0 -> 4772 bytes doc/html/TechNotes/version.obj | 96 ++++++ 35 files changed, 4108 insertions(+) create mode 100644 doc/html/TechNotes/BigDataSmMach.html create mode 100644 doc/html/TechNotes/ChStudy_1000x1000.gif create mode 100644 doc/html/TechNotes/ChStudy_250x250.gif create mode 100644 doc/html/TechNotes/ChStudy_499x499.gif create mode 100644 doc/html/TechNotes/ChStudy_5000x1000.gif create mode 100644 doc/html/TechNotes/ChStudy_500x500.gif create mode 100644 doc/html/TechNotes/ChStudy_p1.gif create mode 100644 doc/html/TechNotes/ChStudy_p1.obj create mode 100644 doc/html/TechNotes/ChunkingStudy.html create mode 100644 doc/html/TechNotes/CodeReview.html create mode 100644 doc/html/TechNotes/ExternalFiles.html create mode 100644 doc/html/TechNotes/FreeLists.html create mode 100644 doc/html/TechNotes/H4-H5Compat.html create mode 100644 doc/html/TechNotes/HeapMgmt.html create mode 100644 doc/html/TechNotes/IOPipe.html create mode 100644 doc/html/TechNotes/LibMaint.html create mode 100644 doc/html/TechNotes/MemoryMgmt.html create mode 100644 doc/html/TechNotes/MoveDStruct.html create mode 100644 doc/html/TechNotes/NamingScheme.html create mode 100644 doc/html/TechNotes/ObjectHeader.html create mode 100644 doc/html/TechNotes/RawDStorage.html create mode 100644 doc/html/TechNotes/SymbolTables.html create mode 100644 doc/html/TechNotes/Version.html create mode 100644 doc/html/TechNotes/pipe1.gif create mode 100644 doc/html/TechNotes/pipe1.obj create mode 100644 doc/html/TechNotes/pipe2.gif create mode 100644 doc/html/TechNotes/pipe2.obj create mode 100644 doc/html/TechNotes/pipe3.gif create mode 100644 doc/html/TechNotes/pipe3.obj create mode 100644 doc/html/TechNotes/pipe4.gif create mode 100644 doc/html/TechNotes/pipe4.obj create mode 100644 doc/html/TechNotes/pipe5.gif create mode 100644 doc/html/TechNotes/pipe5.obj create mode 100644 doc/html/TechNotes/version.gif create mode 100644 doc/html/TechNotes/version.obj diff --git a/doc/html/TechNotes/BigDataSmMach.html b/doc/html/TechNotes/BigDataSmMach.html new file mode 100644 index 0000000..fe00ff8 --- /dev/null +++ b/doc/html/TechNotes/BigDataSmMach.html @@ -0,0 +1,122 @@ + + + + Big Datasets on Small Machines + + + +

Big Datasets on Small Machines

+ +

1. Introduction

+ +

The HDF5 library is able to handle files larger than the + maximum file size, and datasets larger than the maximum memory + size. For instance, a machine where sizeof(off_t) + and sizeof(size_t) are both four bytes can handle + datasets and files as large as 18x10^18 bytes. However, most + Unix systems limit the number of concurrently open files, so a + practical file size limit is closer to 512GB or 1TB. + +

Two "tricks" must be imployed on these small systems in order + to store large datasets. The first trick circumvents the + off_t file size limit and the second circumvents + the size_t main memory limit. + +

2. File Size Limits

+ +

Systems that have 64-bit file addresses will be able to access + those files automatically. One should see the following output + from configure: + +

+checking size of off_t... 8
+    
+ +

Also, some 32-bit operating systems have special file systems + that can support large (>2GB) files and HDF5 will detect + these and use them automatically. If this is the case, the + output from configure will show: + +

+checking for lseek64... yes
+checking for fseek64... yes
+    
+ +

Otherwise one must use an HDF5 file family. Such a family is + created by setting file family properties in a file access + property list and then supplying a file name that includes a + printf-style integer format. For instance: + +

+hid_t plist, file;
+plist = H5Pcreate (H5P_FILE_ACCESS);
+H5Pset_family (plist, 1<<30, H5P_DEFAULT);
+file = H5Fcreate ("big%03d.h5", H5F_ACC_TRUNC, H5P_DEFAULT, plist);
+    
+ +

The second argument (1<<30) to + H5Pset_family() indicates that the family members + are to be 2^30 bytes (1GB) each although we could have used any + reasonably large value. In general, family members cannot be + 2GB because writes to byte number 2,147,483,647 will fail, so + the largest safe value for a family member is 2,147,483,647. + HDF5 will create family members on demand as the HDF5 address + space increases, but since most Unix systems limit the number of + concurrently open files the effective maximum size of the HDF5 + address space will be limited (the system on which this was + developed allows 1024 open files, so if each family member is + approx 2GB then the largest HDF5 file is approx 2TB). + +

If the effective HDF5 address space is limited then one may be + able to store datasets as external datasets each spanning + multiple files of any length since HDF5 opens external dataset + files one at a time. To arrange storage for a 5TB dataset split + among 1GB files one could say: + +

+hid_t plist = H5Pcreate (H5P_DATASET_CREATE);
+for (i=0; i<5*1024; i++) {
+   sprintf (name, "velocity-%04d.raw", i);
+   H5Pset_external (plist, name, 0, (size_t)1<<30);
+}
+    
+ +

3. Dataset Size Limits

+ +

The second limit which must be overcome is that of + sizeof(size_t). HDF5 defines a data type called + hsize_t which is used for sizes of datasets and is, + by default, defined as unsigned long long. + +

To create a dataset with 8*2^30 4-byte integers for a total of + 32GB one first creates the dataspace. We give two examples + here: a 4-dimensional dataset whose dimension sizes are smaller + than the maximum value of a size_t, and a + 1-dimensional dataset whose dimension size is too large to fit + in a size_t. + +

+hsize_t size1[4] = {8, 1024, 1024, 1024};
+hid_t space1 = H5Screate_simple (4, size1, size1);
+
+hsize_t size2[1] = {8589934592LL};
+hid_t space2 = H5Screate_simple (1, size2, size2};
+    
+ +

However, the LL suffix is not portable, so it may + be better to replace the number with + (hsize_t)8*1024*1024*1024. + +

For compilers that don't support long long large + datasets will not be possible. The library performs too much + arithmetic on hsize_t types to make the use of a + struct feasible. + +


+
Robb Matzke
+ + +Last modified: Sun Jul 19 11:37:25 EDT 1998 + + + diff --git a/doc/html/TechNotes/ChStudy_1000x1000.gif b/doc/html/TechNotes/ChStudy_1000x1000.gif new file mode 100644 index 0000000..b7d5a83 Binary files /dev/null and b/doc/html/TechNotes/ChStudy_1000x1000.gif differ diff --git a/doc/html/TechNotes/ChStudy_250x250.gif b/doc/html/TechNotes/ChStudy_250x250.gif new file mode 100644 index 0000000..fe35f39 Binary files /dev/null and b/doc/html/TechNotes/ChStudy_250x250.gif differ diff --git a/doc/html/TechNotes/ChStudy_499x499.gif b/doc/html/TechNotes/ChStudy_499x499.gif new file mode 100644 index 0000000..0d2038b Binary files /dev/null and b/doc/html/TechNotes/ChStudy_499x499.gif differ diff --git a/doc/html/TechNotes/ChStudy_5000x1000.gif b/doc/html/TechNotes/ChStudy_5000x1000.gif new file mode 100644 index 0000000..0f3c290 Binary files /dev/null and b/doc/html/TechNotes/ChStudy_5000x1000.gif differ diff --git a/doc/html/TechNotes/ChStudy_500x500.gif b/doc/html/TechNotes/ChStudy_500x500.gif new file mode 100644 index 0000000..38dd7d6 Binary files /dev/null and b/doc/html/TechNotes/ChStudy_500x500.gif differ diff --git a/doc/html/TechNotes/ChStudy_p1.gif b/doc/html/TechNotes/ChStudy_p1.gif new file mode 100644 index 0000000..938d133 Binary files /dev/null and b/doc/html/TechNotes/ChStudy_p1.gif differ diff --git a/doc/html/TechNotes/ChStudy_p1.obj b/doc/html/TechNotes/ChStudy_p1.obj new file mode 100644 index 0000000..6fbf583 --- /dev/null +++ b/doc/html/TechNotes/ChStudy_p1.obj @@ -0,0 +1,113 @@ +%TGIF 3.0-p5 +state(0,33,100,0,0,0,16,1,9,1,1,0,0,3,7,1,1,'Helvetica',0,24,0,0,0,10,0,0,1,1,0,16,0,0,1,1,1,0,1088,1408,0,0,2880). +% +% @(#)$Header$ +% %W% +% +unit("1 pixel/pixel"). +page(1,"",1). +box('black',64,64,384,384,0,1,1,22,0,0,0,0,0,'1',[ +]). +poly('black',2,[ + 128,64,128,384],0,1,1,23,0,0,0,0,8,3,0,0,0,'1','8','3', + "0",[ +]). +poly('black',2,[ + 192,64,192,384],0,1,1,24,0,0,0,0,8,3,0,0,0,'1','8','3', + "0",[ +]). +poly('black',2,[ + 256,64,256,384],0,1,1,25,0,0,0,0,8,3,0,0,0,'1','8','3', + "0",[ +]). +poly('black',2,[ + 320,64,320,384],0,1,1,26,0,0,0,0,8,3,0,0,0,'1','8','3', + "0",[ +]). +poly('black',2,[ + 64,128,384,128],0,1,1,27,0,0,0,0,8,3,0,0,0,'1','8','3', + "0",[ +]). +poly('black',2,[ + 64,192,384,192],0,1,1,28,0,0,0,0,8,3,0,0,0,'1','8','3', + "0",[ +]). +poly('black',2,[ + 64,256,384,256],0,1,1,29,0,0,0,0,8,3,0,0,0,'1','8','3', + "0",[ +]). +poly('black',2,[ + 64,320,384,320],0,1,1,30,0,0,0,0,8,3,0,0,0,'1','8','3', + "0",[ +]). +text('black',96,80,'Courier',0,17,1,1,0,1,7,14,37,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "1"]). +text('black',160,80,'Courier',0,17,1,1,0,1,7,14,39,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "2"]). +text('black',224,80,'Courier',0,17,1,1,0,1,7,14,41,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "3"]). +text('black',288,80,'Courier',0,17,1,1,0,1,7,14,43,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "4"]). +text('black',352,80,'Courier',0,17,1,1,0,1,7,14,47,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "5"]). +text('black',96,144,'Courier',0,17,1,1,0,1,7,14,51,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "6"]). +text('black',160,144,'Courier',0,17,1,1,0,1,7,14,53,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "7"]). +text('black',224,144,'Courier',0,17,1,1,0,1,7,14,55,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "8"]). +text('black',288,144,'Courier',0,17,1,1,0,1,7,14,57,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "9"]). +text('black',352,144,'Courier',0,17,1,1,0,1,14,14,59,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "10"]). +text('black',96,208,'Courier',0,17,1,1,0,1,14,14,61,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "11"]). +text('black',160,208,'Courier',0,17,1,1,0,1,14,14,63,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "12"]). +text('black',224,208,'Courier',0,17,1,1,0,1,14,14,65,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "13"]). +text('black',288,208,'Courier',0,17,1,1,0,1,14,14,67,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "14"]). +text('black',352,208,'Courier',0,17,1,1,0,1,14,14,71,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "15"]). +text('black',96,272,'Courier',0,17,1,1,0,1,14,14,75,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "16"]). +text('black',160,272,'Courier',0,17,1,1,0,1,14,14,77,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "17"]). +text('black',224,272,'Courier',0,17,1,1,0,1,14,14,79,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "18"]). +text('black',288,272,'Courier',0,17,1,1,0,1,14,14,81,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "19"]). +text('black',352,272,'Courier',0,17,1,1,0,1,14,14,83,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "20"]). +text('black',96,336,'Courier',0,17,1,1,0,1,14,14,87,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "21"]). +text('black',160,336,'Courier',0,17,1,1,0,1,14,14,89,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "22"]). +text('black',224,336,'Courier',0,17,1,1,0,1,14,14,91,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "23"]). +text('black',288,336,'Courier',0,17,1,1,0,1,14,14,93,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "24"]). +text('black',352,336,'Courier',0,17,1,1,0,1,14,14,95,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "25"]). +poly('black',2,[ + 416,64,416,384],3,1,1,100,0,0,0,0,8,3,0,0,0,'1','8','3', + "0",[ +]). +poly('black',2,[ + 64,416,384,416],3,1,1,101,0,0,0,0,8,3,0,0,0,'1','8','3', + "0",[ +]). +text('black',390,228,'Courier',0,17,1,0,0,1,14,35,102,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,1,0,[ + 390,228,390,228,425,242,0,-1000,1000,0,34,18,389,227,426,243],[ + "5,000"]). +text('black',224,432,'Courier',0,17,1,1,0,1,35,14,116,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "5,000"]). +text('black',160,512,'Courier',0,17,1,0,0,1,105,14,131,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "= 1,000 x 1,000"]). +box('black',80,480,144,544,7,1,1,134,0,0,0,0,0,'1',[ +]). +text('black',224,16,'Helvetica',0,24,1,1,0,1,296,29,144,0,24,5,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "Order that data was written"]). +box('black',32,0,464,576,0,1,1,149,0,0,0,0,0,'1',[ +]). diff --git a/doc/html/TechNotes/ChunkingStudy.html b/doc/html/TechNotes/ChunkingStudy.html new file mode 100644 index 0000000..776b8fe --- /dev/null +++ b/doc/html/TechNotes/ChunkingStudy.html @@ -0,0 +1,190 @@ + + + + Testing the chunked layout of HDF5 + + + + + +This document is of interest primarily for its discussion of the +HDF team's motivation for implementing raw data caching. +At a more abstract level, the discussion of the principles of +data chunking is also of interest, but a more recent discussion +of that topic can be found in +Dataset Chunking Issues in the +HDF5 User's Guide. + +The performance study described here predates the current chunking +implementation in the HDF5 library, so the particular performance data +is no longer apropos. +     -- the Editor + + +

Testing the chunked layout of HDF5

+ +

This is the results of studying the chunked layout policy in + HDF5. A 1000 by 1000 array of integers was written to a file + dataset extending the dataset with each write to create, in the + end, a 5000 by 5000 array of 4-byte integers for a total data + storage size of 100 million bytes. + +

+

+ Order that data was written +
Fig 1: Write-order of Output Blocks +
+ +

After the array was written, it was read back in blocks that + were 500 by 500 bytes in row major order (that is, the top-left + quadrant of output block one, then the top-right quadrant of + output block one, then the top-left quadrant of output block 2, + etc.). + +

I tried to answer two questions: +

+ +

I started with chunk sizes that were multiples of the read + block size or k*(500, 500). + +

+

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Table 1: Total File Overhead +
Chunk Size (elements)Meta Data Overhead (ppm)Raw Data Overhead (ppm)
500 by 50085.840.00
1000 by 100023.080.00
5000 by 100023.080.00
250 by 250253.300.00
499 by 49985.84205164.84
+
+ +
+

+

+ 500x500 +
Fig 2: Chunk size is 500x500 +
+ +

The first half of Figure 2 shows output to the file while the + second half shows input. Each dot represents a file-level I/O + request and the lines that connect the dots are for visual + clarity. The size of the request is not indicated in the + graph. The output block size is four times the chunk size which + results in four file-level write requests per block for a total + of 100 requests. Since file space for the chunks was allocated + in output order, and the input block size is 1/4 the output + block size, the input shows a staircase effect. Each input + request results in one file-level read request. The downward + spike at about the 60-millionth byte is probably the result of a + cache miss for the B-tree and the downward spike at the end is + probably a cache flush or file boot block update. + +


+

+

+ 1000x1000 +
Fig 2: Chunk size is 1000x1000 +
+ +

In this test I increased the chunk size to match the output + chunk size and one can see from the first half of the graph that + 25 file-level write requests were issued, one for each output + block. The read half of the test shows that four times the + amount of data was read as written. This results from the fact + that HDF5 must read the entire chunk for any request that falls + within that chunk, which is done because (1) if the data is + compressed the entire chunk must be decompressed, and (2) the + library assumes that a chunk size was chosen to optimize disk + performance. + +


+

+

+ 5000x1000 +
Fig 3: Chunk size is 5000x1000 +
+ +

Increasing the chunk size further results in even worse + performance since both the read and write halves of the test are + re-reading and re-writing vast amounts of data. This proves + that one should be careful that chunk sizes are not much larger + than the typical partial I/O request. + +


+

+

+ 250x250 +
Fig 4: Chunk size is 250x250 +
+ +

If the chunk size is decreased then the amount of data + transfered between the disk and library is optimal for no + caching, but the amount of meta data required to describe the + chunk locations increases to 250 parts per million. One can + also see that the final downward spike contains more file-level + write requests as the meta data is flushed to disk just before + the file is closed. + +


+

+

+ 499x499 +
Fig 4: Chunk size is 499x499 +
+ +

This test shows the result of choosing a chunk size which is + close to the I/O block size. Because the total size of the + array isn't a multiple of the chunk size, the library allocates + an extra zone of chunks around the top and right edges of the + array which are only partially filled. This results in + 20,516,484 extra bytes of storage, a 20% increase in the total + raw data storage size. But the amount of meta data overhead is + the same as for the 500 by 500 test. In addition, the mismatch + causes entire chunks to be read in order to update a few + elements along the edge or the chunk which results in a 3.6-fold + increase in the amount of data transfered. + +


+
HDF Help Desk
+ + +Last modified: 30 Jan 1998 (technical content) +
+Last modified: 9 May 2000 (editor's note) + + + diff --git a/doc/html/TechNotes/CodeReview.html b/doc/html/TechNotes/CodeReview.html new file mode 100644 index 0000000..213cbbe --- /dev/null +++ b/doc/html/TechNotes/CodeReview.html @@ -0,0 +1,300 @@ + + + + Code Review + + +

Code Review 1

+ +

Some background...

+

This is one of the functions exported from the + H5B.c file that implements a B-link-tree class + without worrying about concurrency yet (thus the `Note:' in the + function prologue). The H5B.c file provides the + basic machinery for operating on generic B-trees, but it isn't + much use by itself. Various subclasses of the B-tree (like + symbol tables or indirect storage) provide their own interface + and back end to this function. For instance, + H5G_stab_find() takes a symbol table OID and a name + and calls H5B_find() with an appropriate + udata argument that eventually gets passed to the + H5G_stab_find() function. + +

+ 1 /*-------------------------------------------------------------------------
+ 2  * Function:    H5B_find
+ 3  *
+ 4  * Purpose:     Locate the specified information in a B-tree and return
+ 5  *              that information by filling in fields of the caller-supplied
+ 6  *              UDATA pointer depending on the type of leaf node
+ 7  *              requested.  The UDATA can point to additional data passed
+ 8  *              to the key comparison function.
+ 9  *
+10  * Note:        This function does not follow the left/right sibling
+11  *              pointers since it assumes that all nodes can be reached
+12  *              from the parent node.
+13  *
+14  * Return:      Success:        SUCCEED if found, values returned through the
+15  *                              UDATA argument.
+16  *
+17  *              Failure:        FAIL if not found, UDATA is undefined.
+18  *
+19  * Programmer:  Robb Matzke
+20  *              matzke@llnl.gov
+21  *              Jun 23 1997
+22  *
+23  * Modifications:
+24  *
+25  *-------------------------------------------------------------------------
+26  */
+27 herr_t
+28 H5B_find (H5F_t *f, const H5B_class_t *type, const haddr_t *addr, void *udata)
+29 {
+30    H5B_t        *bt=NULL;
+31    intn         idx=-1, lt=0, rt, cmp=1;
+32    int          ret_value = FAIL;
+    
+ +

All pointer arguments are initialized when defined. I don't + worry much about non-pointers because it's usually obvious when + the value isn't initialized. + +

+33 
+34    FUNC_ENTER (H5B_find, NULL, FAIL);
+35 
+36    /*
+37     * Check arguments.
+38     */
+39    assert (f);
+40    assert (type);
+41    assert (type->decode);
+42    assert (type->cmp3);
+43    assert (type->found);
+44    assert (addr && H5F_addr_defined (addr));
+    
+ +

I use assert to check invariant conditions. At + this level of the library, none of these assertions should fail + unless something is majorly wrong. The arguments should have + already been checked by higher layers. It also provides + documentation about what arguments might be optional. + +

+45    
+46    /*
+47     * Perform a binary search to locate the child which contains
+48     * the thing for which we're searching.
+49     */
+50    if (NULL==(bt=H5AC_protect (f, H5AC_BT, addr, type, udata))) {
+51       HGOTO_ERROR (H5E_BTREE, H5E_CANTLOAD, FAIL);
+52    }
+    
+ +

You'll see this quite often in the low-level stuff and it's + documented in the H5AC.c file. The + H5AC_protect insures that the B-tree node (which + inherits from the H5AC package) whose OID is addr + is locked into memory for the duration of this function (see the + H5AC_unprotect on line 90). Most likely, if this + node has been accessed in the not-to-distant past, it will still + be in memory and the H5AC_protect is almost a + no-op. If cache debugging is compiled in, then the protect also + prevents other parts of the library from accessing the node + while this function is protecting it, so this function can allow + the node to be in an inconsistent state while calling other + parts of the library. + +

The alternative is to call the slighlty cheaper + H5AC_find and assume that the pointer it returns is + valid only until some other library function is called, but + since we're accessing the pointer throughout this function, I + chose to use the simpler protect scheme. All protected objects + must be unprotected before the file is closed, thus the + use of HGOTO_ERROR instead of + HRETURN_ERROR. + +

+53    rt = bt->nchildren;
+54 
+55    while (lt<rt && cmp) {
+56       idx = (lt + rt) / 2;
+57       if (H5B_decode_keys (f, bt, idx)<0) {
+58          HGOTO_ERROR (H5E_BTREE, H5E_CANTDECODE, FAIL);
+59       }
+60 
+61       /* compare */
+62       if ((cmp=(type->cmp3)(f, bt->key[idx].nkey, udata,
+63                             bt->key[idx+1].nkey))<0) {
+64          rt = idx;
+65       } else {
+66          lt = idx+1;
+67       }
+68    }
+69    if (cmp) {
+70       HGOTO_ERROR (H5E_BTREE, H5E_NOTFOUND, FAIL);
+71    }
+    
+ +

Code is arranged in paragraphs with a comment starting each + paragraph. The previous paragraph is a standard binary search + algorithm. The (type->cmp3)() is an indirect + function call into the subclass of the B-tree. All indirect + function calls have the function part in parentheses to document + that it's indirect (quite obvious here, but not so obvious when + the function is a variable). + +

It's also my standard practice to have side effects in + conditional expressions because I can write code faster and it's + more apparent to me what the condition is testing. But if I + have an assignment in a conditional expr, then I use an extra + set of parens even if they're not required (usually they are, as + in this case) so it's clear that I meant = instead + of ==. + +

+72 
+73    /*
+74     * Follow the link to the subtree or to the data node.
+75     */
+76    assert (idx>=0 && idxnchildren);
+77    if (bt->level > 0) {
+78       if ((ret_value = H5B_find (f, type, bt->child+idx, udata))<0) {
+79          HGOTO_ERROR (H5E_BTREE, H5E_NOTFOUND, FAIL);
+80       }
+81    } else {
+82       ret_value = (type->found)(f, bt->child+idx, bt->key[idx].nkey,
+83                                 udata, bt->key[idx+1].nkey);
+84       if (ret_value<0) {
+85          HGOTO_ERROR (H5E_BTREE, H5E_NOTFOUND, FAIL);
+86       }
+87    }
+    
+ +

Here I broke the "side effect in conditional" rule, which I + sometimes do if the expression is so long that the + <0 gets lost at the end. Another thing to note is + that success/failure is always determined by comparing with zero + instead of SUCCEED or FAIL. I do this + because occassionally one might want to return other meaningful + values (always non-negative) or distinguish between various types of + failure (always negative). + +

+88 
+89 done:
+90    if (bt && H5AC_unprotect (f, H5AC_BT, addr, bt)<0) {
+91       HRETURN_ERROR (H5E_BTREE, H5E_PROTECT, FAIL);
+92    }
+93    FUNC_LEAVE (ret_value);
+94 }
+    
+ +

For lack of a better way to handle errors during error cleanup, + I just call the HRETURN_ERROR macro even though it + will make the error stack not quite right. I also use short + circuiting boolean operators instead of nested if + statements since that's standard C practice. + +

Code Review 2

+ + +

The following code is an API function from the H5F package... + +

+ 1 /*--------------------------------------------------------------------------
+ 2  NAME
+ 3     H5Fflush
+ 4 
+ 5  PURPOSE
+ 6     Flush all cached data to disk and optionally invalidates all cached
+ 7     data.
+ 8 
+ 9  USAGE
+10     herr_t H5Fflush(fid, invalidate)
+11         hid_t fid;              IN: File ID of file to close.
+12         hbool_t invalidate;     IN: Invalidate all of the cache?
+13 
+14  ERRORS
+15     ARGS      BADTYPE       Not a file atom. 
+16     ATOM      BADATOM       Can't get file struct. 
+17     CACHE     CANTFLUSH     Flush failed. 
+18 
+19  RETURNS
+20     SUCCEED/FAIL
+21 
+22  DESCRIPTION
+23         This function flushes all cached data to disk and, if INVALIDATE
+24     is non-zero, removes cached objects from the cache so they must be
+25     re-read from the file on the next access to the object.
+26 
+27  MODIFICATIONS:
+28 --------------------------------------------------------------------------*/
+    
+ +

An API prologue is used for each API function instead of my + normal function prologue. I use the prologue from Code Review 1 + for non-API functions because it's more suited to C programmers, + it requires less work to keep it synchronized with the code, and + I have better editing tools for it. + +

+29 herr_t
+30 H5Fflush (hid_t fid, hbool_t invalidate)
+31 {
+32    H5F_t        *file = NULL;
+33 
+34    FUNC_ENTER (H5Fflush, H5F_init_interface, FAIL);
+35    H5ECLEAR;
+    
+ +

API functions are never called internally, therefore I always + clear the error stack before doing anything. + +

+36 
+37    /* check arguments */
+38    if (H5_FILE!=H5Aatom_group (fid)) {
+39       HRETURN_ERROR (H5E_ARGS, H5E_BADTYPE, FAIL); /*not a file atom*/
+40    }
+41    if (NULL==(file=H5Aatom_object (fid))) {
+42       HRETURN_ERROR (H5E_ATOM, H5E_BADATOM, FAIL); /*can't get file struct*/
+43    }
+    
+ +

If something is wrong with the arguments then we raise an + error. We never assert arguments at this level. + We also convert atoms to pointers since atoms are really just a + pointer-hiding mechanism. Functions that can be called + internally always have pointer arguments instead of atoms + because (1) then they don't have to always convert atoms to + pointers, and (2) the various pointer data types provide more + documentation and type checking than just an hid_t + type. + +

+44 
+45    /* do work */
+46    if (H5F_flush (file, invalidate)<0) {
+47       HRETURN_ERROR (H5E_CACHE, H5E_CANTFLUSH, FAIL); /*flush failed*/
+48    }
+    
+ +

An internal version of the function does the real work. That + internal version calls assert to check/document + it's arguments and can be called from other library functions. + +

+49 
+50    FUNC_LEAVE (SUCCEED);
+51 }
+    
+ +
+
Robb Matzke
+ + +Last modified: Mon Nov 10 15:33:33 EST 1997 + + + diff --git a/doc/html/TechNotes/ExternalFiles.html b/doc/html/TechNotes/ExternalFiles.html new file mode 100644 index 0000000..c3197af --- /dev/null +++ b/doc/html/TechNotes/ExternalFiles.html @@ -0,0 +1,279 @@ + + + + External Files in HDF5 + + + +

External Files in HDF5

+ +

Overview of Layers

+ +

This table shows some of the layers of HDF5. Each layer calls + functions at the same or lower layers and never functions at + higher layers. An object identifier (OID) takes various forms + at the various layers: at layer 0 an OID is an absolute physical + file address; at layers 1 and 2 it's an absolute virtual file + address. At layers 3 through 6 it's a relative address, and at + layers 7 and above it's an object handle. + +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Layer-7GroupsDatasets
Layer-6Indirect StorageSymbol Tables
Layer-5B-treesObject HdrsHeaps
Layer-4Caching
Layer-3H5F chunk I/O
Layer-2H5F low
Layer-1File FamilySplit Meta/Raw
Layer-0Section-2 I/OStandard I/OMalloc/Free
+
+ +

Single Address Space

+ +

The simplest form of hdf5 file is a single file containing only + hdf5 data. The file begins with the boot block, which is + followed until the end of the file by hdf5 data. The next most + complicated file allows non-hdf5 data (user defined data or + internal wrappers) to appear before the boot block and after the + end of the hdf5 data. The hdf5 data is treated as a single + linear address space in both cases. + +

The next level of complexity comes when non-hdf5 data is + interspersed with the hdf5 data. We handle that by including + the non-hdf5 interspersed data in the hdf5 address space and + simply not referencing it (eventually we might add those + addresses to a "do-not-disturb" list using the same mechanism as + the hdf5 free list, but it's not absolutely necessary). This is + implemented except for the "do-not-disturb" list. + +

The most complicated single address space hdf5 file is when we + allow the address space to be split among multiple physical + files. For instance, a >2GB file can be split into smaller + chunks and transfered to a 32 bit machine, then accessed as a + single logical hdf5 file. The library already supports >32 bit + addresses, so at layer 1 we split a 64-bit address into a 32-bit + file number and a 32-bit offset (the 64 and 32 are + arbitrary). The rest of the library still operates with a linear + address space. + +

Another variation might be a family of two files where all the + meta data is stored in one file and all the raw data is stored + in another file to allow the HDF5 wrapper to be easily replaced + with some other wrapper. + +

The H5Fcreate and H5Fopen functions + would need to be modified to pass file-type info down to layer 2 + so the correct drivers can be called and parameters passed to + the drivers to initialize them. + +

Implementation

+ +

I've implemented fixed-size family members. The entire hdf5 + file is partitioned into members where each member is the same + size. The family scheme is used if one passes a name to + H5F_open (which is called by H5Fopen() + and H5Fcreate) that contains a + printf(3c)-style integer format specifier. + Currently, the default low-level file driver is used for all + family members (H5F_LOW_DFLT, usually set to be Section 2 I/O or + Section 3 stdio), but we'll probably eventually want to pass + that as a parameter of the file access property list, which + hasn't been implemented yet. When creating a family, a default + family member size is used (defined at the top H5Ffamily.c, + currently 64MB) but that also should be settable in the file + access property list. When opening an existing family, the size + of the first member is used to determine the member size + (flushing/closing a family ensures that the first member is the + correct size) but the other family members don't have to be that + large (the local address space, however, is logically the same + size for all members). + +

I haven't implemented a split meta/raw family yet but am rather + curious to see how it would perform. I was planning to use the + `.h5' extension for the meta data file and `.raw' for the raw + data file. The high-order bit in the address would determine + whether the address refers to meta data or raw data. If the user + passes a name that ends with `.raw' to H5F_open + then we'll chose the split family and use the default low level + driver for each of the two family members. Eventually we'll + want to pass these kinds of things through the file access + property list instead of relying on naming convention. + +

External Raw Data

+ +

We also need the ability to point to raw data that isn't in the + HDF5 linear address space. For instance, a dataset might be + striped across several raw data files. + +

Fortunately, the only two packages that need to be aware of + this are the packages for reading/writing contiguous raw data + and discontiguous raw data. Since contiguous raw data is a + special case, I'll discuss how to implement external raw data in + the discontiguous case. + +

Discontiguous data is stored as a B-tree whose keys are the + chunk indices and whose leaf nodes point to the raw data by + storing a file address. So what we need is some way to name the + external files, and a way to efficiently store the external file + name for each chunk. + +

I propose adding to the object header an External File + List message that is a 1-origin array of file names. + Then, in the B-tree, each key has an index into the External + File List (or zero for the HDF5 file) for the file where the + chunk can be found. The external file index is only used at + the leaf nodes to get to the raw data (the entire B-tree is in + the HDF5 file) but because of the way keys are copied among + the B-tree nodes, it's much easier to store the index with + every key. + +

Multiple HDF5 Files

+ +

One might also want to combine two or more HDF5 files in a + manner similar to mounting file systems in Unix. That is, the + group structure and meta data from one file appear as though + they exist in the first file. One opens File-A, and then + mounts File-B at some point in File-A, the mount + point, so that traversing into the mount point actually + causes one to enter the root object of File-B. File-A and + File-B are each complete HDF5 files and can be accessed + individually without mounting them. + +

We need a couple additional pieces of machinery to make this + work. First, an haddr_t type (a file address) doesn't contain + any info about which HDF5 file's address space the address + belongs to. But since haddr_t is an opaque type except at + layers 2 and below, it should be quite easy to add a pointer to + the HDF5 file. This would also remove the H5F_t argument from + most of the low-level functions since it would be part of the + OID. + +

The other thing we need is a table of mount points and some + functions that understand them. We would add the following + table to each H5F_t struct: + +

+struct H5F_mount_t {
+   H5F_t *parent;         /* Parent HDF5 file if any */
+   struct {
+      H5F_t *f;           /* File which is mounted */
+      haddr_t where;      /* Address of mount point */
+   } *mount;              /* Array sorted by mount point */
+   intn nmounts;          /* Number of mounted files */
+   intn alloc;            /* Size of mount table */
+}
+    
+ +

The H5Fmount function takes the ID of an open + file or group, the name of a to-be-mounted file, the name of the mount + point, and a file access property list (like H5Fopen). + It opens the new file and adds a record to the parent's mount + table. The H5Funmount function takes the parent + file or group ID and the name of the mount point and disassociates + the mounted file from the mount point. It does not close the + mounted file. The H5Fclose + function closes/unmounts files recursively. + +

The H5G_iname function which translates a name to + a file address (haddr_t) looks at the mount table + at each step in the translation and switches files where + appropriate. All name-to-address translations occur through + this function. + +

How Long?

+ +

I'm expecting to be able to implement the two new flavors of + single linear address space in about two days. It took two hours + to implement the malloc/free file driver at level zero and I + don't expect this to be much more work. + +

I'm expecting three days to implement the external raw data for + discontiguous arrays. Adding the file index to the B-tree is + quite trivial; adding the external file list message shouldn't + be too hard since the object header message class from wich this + message derives is fully implemented; and changing + H5F_istore_read should be trivial. Most of the + time will be spent designing a way to cache Unix file + descriptors efficiently since the total number open files + allowed per process could be much smaller than the total number + of HDF5 files and external raw data files. + +

I'm expecting four days to implement being able to mount one + HDF5 file on another. I was originally planning a lot more, but + making haddr_t opaque turned out to be much easier + than I planned (I did it last Fri). Most of the work will + probably be removing the redundant H5F_t arguments for lots of + functions. + +

Conclusion

+ +

The external raw data could be implemented as a single linear + address space, but doing so would require one to allocate large + enough file addresses throughout the file (>32bits) before the + file was created. It would make mixing an HDF5 file family with + external raw data, or external HDF5 wrapper around an HDF4 file + a more difficult process. So I consider the implementation of + external raw data files as a single HDF5 linear address space a + kludge. + +

The ability to mount one HDF5 file on another might not be a + very important feature especially since each HDF5 file must be a + complete file by itself. It's not possible to stripe an array + over multiple HDF5 files because the B-tree wouldn't be complete + in any one file, so the only choice is to stripe the array + across multiple raw data files and store the B-tree in the HDF5 + file. On the other hand, it might be useful if one file + contains some public data which can be mounted by other files + (e.g., a mesh topology shared among collaborators and mounted by + files that contain other fields defined on the mesh). Of course + the applications can open the two files separately, but it might + be more portable if we support it in the library. + +

So we're looking at about two weeks to implement all three + versions. I didn't get a chance to do any of them in AIO + although we had long-term plans for the first two with a + possibility of the third. They'll be much easier to implement in + HDF5 than AIO since I've been keeping these in mind from the + start. + +


+
Robb Matzke
+ + +Last modified: Tue Sep 8 14:43:32 EDT 1998 + + + diff --git a/doc/html/TechNotes/FreeLists.html b/doc/html/TechNotes/FreeLists.html new file mode 100644 index 0000000..1a4b8e8 --- /dev/null +++ b/doc/html/TechNotes/FreeLists.html @@ -0,0 +1,205 @@ + + +Memory Management and Free Lists + + + + +

Memory Management and Free Lists

+ +
+
+At Release 1.2.2, free list management code was introduced to the HDF5 
+library.  This included one user-level function, H5garbage_collect, which 
+garbage collects on all the free-lists.  H5garbage_collect is the only user-
+accessible (i.e., application developer-accessible) element of this 
+functionality.
+
+The free-lists generally reduce the amount of dynamic memory used to around 
+75% of the pre-optimized amount as well as speed up the time in the library 
+code by ~5% The free-lists also help linearize the amount of memory used with 
+increasing numbers of datasets or re-writes on the data, so the amount of 
+memory used for the 1500/45 free-list case is only 66% of the memory used for 
+the unoptimized case.
+
+Overall, the introduction of free list management is a win: the library is 
+slightly faster and uses much less system resources than before.  Most of the
+emphasis has been focused on the main "thouroughfares" through the code;
+less attention was paid to the "back streets" which are used much less 
+frequently and offer less potential for abuse.
+
+Adding a free-list for a data structure in the HDF5 library code is easy:
+
+Old code:
+---------
+    int foo(void)
+    {
+        H5W_t *w;
+
+        for(i=0; i<x; i++) {
+            w=H5MM_malloc(sizeof(H5W_t));
+            <use w>
+            H5MM_xfree(w);
+        }
+    }
+
+New code:
+---------
+H5FL_DEFINE(H5W_t);
+
+    int foo(void)
+    {
+        H5W_t *w;
+
+        for(i=0; i<x; i++) {
+            w=H5FL_ALLOC(H5W_t,0);
+            <use w>
+            H5FL_FREE(H5W_t,w);
+        }
+    }
+
+
+There are three kinds of free-lists: 
+   -- for "regular" objects, 
+   -- arrays of fixed size object (both fixed length and unknown length), and 
+   -- "blocks" of bytes.  
+ 
+   "Regular" free-lists use the H5FL_<*> macros in H5FLprivate.h and are
+   designed for single, fixed-size data structures like typedef'ed structs,
+   etc.  
+
+   Arrays of objects use the H5FL_ARR_<*> macros and are designed for arrays 
+   (both fixed in length and varying lengths) of fixed length data structures 
+   (like typedef'ed types).  
+
+   "Block" free-lists use the H5FL_BLK_<*> macros and are designed to hold 
+   varying sized buffers of bytes, with no structure.  
+
+   H5S.c contains examples for "regular" and fixed-sized arrays;
+   H5B.c contains examples for variable-sized arrays and "blocks".
+
+A free-list doesn't have to be used for every data structure allocated and
+freed, just for those which are prone to abuse when multiple operations are
+being performed.  It is important to use the macros for declaring and
+manipulating the free-lists however; they allow the free'd objects on the 
+lists to be garbage collected by the library at the library's termination 
+or at the user's request.
+
+One public API function has been added: H5garbage_collect, which garbage 
+collects on all the free-lists of all the different types.  It's not required 
+to be called and is only necessary in situations when the application 
+performs actions which cause the library to allocate many objects and then 
+the application eventually releases those objects and wants to reduce the 
+memory used by the library from the peak usage required.  The library 
+automatically garbage collects all the free lists when the application ends.
+
+Questions should be sent to the HDF Help Desk at hdfhelp@ncsa.uiuc.edu.
+
+
+===========================================
+BENCHMARK INFORMATION
+===========================================
+
+New version with free lists:
+
+Datasets=500, Data Rewrites=15:
+    Peak Heap Usage: 18210820 bytes
+    Time in library code: 2.260 seconds
+    # of malloc calls: 22864
+
+Datasets=1000, Data Rewrites=15:
+    Peak Heap Usage: 31932420 bytes
+    Time in library code: 5.090 seconds
+    # of malloc calls: 43045
+
+Datasets=1500, Data Rewrites=15:
+    Peak Heap Usage: 41566212 bytes
+    Time in library code: 8.623 seconds
+    # of malloc calls: 60623
+
+Datasets=500, Data Rewrites=30:
+    Peak Heap Usage: 19456004 bytes
+    Time in library code: 4.274 seconds
+    # of malloc calls: 23353
+
+Datasets=1000, Data Rewrites=30:
+    Peak Heap Usage: 33988612 bytes
+    Time in library code: 9.955 seconds
+    # of malloc calls: 43855
+
+Datasets=1500, Data Rewrites=30:
+    Peak Heap Usage: 43950084 bytes
+    Time in library code: 17.413 seconds
+    # of malloc calls: 61554
+
+Datasets=500, Data Rewrites=45:
+    Peak Heap Usage: 20717572 bytes
+    Time in library code: 6.326 seconds
+    # of malloc calls: 23848
+
+Datasets=1000, Data Rewrites=45:
+    Peak Heap Usage: 35807236 bytes
+    Time in library code: 15.146 seconds
+    # of malloc calls: 44572
+
+Datasets=1500, Data Rewrites=45:
+    Peak Heap Usage: 46022660 bytes
+    Time in library code: 27.140 seconds
+    # of malloc calls: 62370
+
+
+Older version with no free lists:
+
+Datasets=500, Data Rewrites=15:
+    Peak Heap Usage: 25370628 bytes
+    Time in library code: 2.329 seconds
+    # of malloc calls: 194991
+
+Datasets=1000, Data Rewrites=15:
+    Peak Heap Usage: 39550980 bytes
+    Time in library code: 5.251 seconds
+    # of malloc calls: 417971
+
+Datasets=1500, Data Rewrites=15:
+    Peak Heap Usage: 68870148 bytes
+    Time in library code: 8.913 seconds
+    # of malloc calls: 676564
+
+Datasets=500, Data Rewrites=30:
+    Peak Heap Usage: 31670276 bytes
+    Time in library code: 4.435 seconds
+    # of malloc calls: 370320
+
+Datasets=1000, Data Rewrites=30:
+    Peak Heap Usage: 44646404 bytes
+    Time in library code: 10.325 seconds
+    # of malloc calls: 797125
+
+Datasets=1500, Data Rewrites=30:
+    Peak Heap Usage: 68870148 bytes
+    Time in library code: 18.057 seconds
+    # of malloc calls: 1295336
+
+Datasets=500, Data Rewrites=45:
+    Peak Heap Usage: 33906692 bytes
+    Time in library code: 6.577 seconds
+    # of malloc calls: 545656
+
+Datasets=1000, Data Rewrites=45:
+    Peak Heap Usage: 56778756 bytes
+    Time in library code: 15.720 seconds
+    # of malloc calls: 1176285
+
+Datasets=1500, Data Rewrites=45:
+    Peak Heap Usage: 68870148 bytes
+    Time in library code: 28.138 seconds
+    # of malloc calls: 1914097
+
+
+===========================================
+Last Modified:  3 May 2000
+HDF Help Desk:  hdfhelp@ncsa.uiuc.edu
+
+
+ + diff --git a/doc/html/TechNotes/H4-H5Compat.html b/doc/html/TechNotes/H4-H5Compat.html new file mode 100644 index 0000000..2992476 --- /dev/null +++ b/doc/html/TechNotes/H4-H5Compat.html @@ -0,0 +1,271 @@ + + + + Backward/Forward Compatability + + + +

Backward/Forward Compatability

+ +

The HDF5 development must proceed in such a manner as to + satisfy the following conditions: + +

    +
  1. HDF5 applications can produce data that HDF5 + applications can read and write and HDF4 applications can produce + data that HDF4 applications can read and write. The situation + that demands this condition is obvious.
  2. + +
  3. HDF5 applications are able to produce data that HDF4 applications + can read and HDF4 applications can subsequently modify the + file subject to certain constraints depending on the + implementation. This condition is for the temporary + situation where a consumer has neither been relinked with a new + HDF4 API built on top of the HDF5 API nor recompiled with the + HDF5 API.
  4. + +
  5. HDF5 applications can read existing HDF4 files and subsequently + modify the file subject to certain constraints depending on + the implementation. This is condition is for the temporary + situation in which the producer has neither been relinked with a + new HDF4 API built on top of the HDF5 API nor recompiled with + the HDF5 API, or the permanent situation of HDF5 consumers + reading archived HDF4 files.
  6. + + +

    There's at least one invarient: new object features introduced + in the HDF5 file format (like 2-d arrays of structs) might be + impossible to "translate" to a format that an old HDF4 + application can understand either because the HDF4 file format + or the HDF4 API has no mechanism to describe the object. + +

    What follows is one possible implementation based on how + Condition B was solved in the AIO/PDB world. It also attempts + to satisfy these goals: + +

      +
    1. The main HDF5 library contains as little extra baggage as + possible by either relying on external programs to take care + of compatability issues or by incorporating the logic of such + programs as optional modules in the HDF5 library. Conditions B + and C are separate programs/modules.
    2. + +
    3. No extra baggage not only means the library proper is small, + but also means it can be implemented (rather than migrated + from HDF4 source) from the ground up with minimal regard for + HDF4 thus keeping the logic straight forward.
    4. + +
    5. Compatability issues are handled behind the scenes when + necessary (and possible) but can be carried out explicitly + during things like data migration.
    6. +
    + +
    +

    Wrappers

    + +

    The proposed implementation uses wrappers to handle + compatability issues. A Format-X file is wrapped in a + Format-Y file by creating a Format-Y skeleton that replicates + the Format-X meta data. The Format-Y skeleton points to the raw + data stored in Format-X without moving the raw data. The + restriction is that raw data storage methods in Format-Y is a + superset of raw data storage methods in Format-X (otherwise the + raw data must be copied to Format-Y). We're assuming that meta + data is small wrt the entire file. + +

    The wrapper can be a separate file that has pointers into the + first file or it can be contained within the first file. If + contained in a single file, the file can appear as a Format-Y + file or simultaneously a Format-Y and Format-X file. + +

    The Format-X meta-data can be thought of as the original + wrapper around raw data and Format-Y is a second wrapper around + the same data. The wrappers are independend of one another; + modifying the meta-data in one wrapper causes the other to + become out of date. Modification of raw data doesn't invalidate + either view as long as the meta data that describes its storage + isn't modifed. For instance, an array element can change values + if storage is already allocated for the element, but if storage + isn't allocated then the meta data describing the storage must + change, invalidating all wrappers but one. + +

    It's perfectly legal to modify the meta data of one wrapper + without modifying the meta data in the other wrapper(s). The + illegal part is accessing the raw data through a wrapper which + is out of date. + +

    If raw data is wrapped by more than one internal wrapper + (internal means that the wrapper is in the same file as + the raw data) then access to that file must assume that + unreferenced parts of that file contain meta data for another + wrapper and cannot be reclaimed as free memory. + +


    +

    Implementation of Condition B

    + +

    Since this is a temporary situation which can't be + automatically detected by the HDF5 library, we must rely + on the application to notify the HDF5 library whether or not it + must satisfy Condition B. (Even if we don't rely on the + application, at some point someone is going to remove the + Condition B constraint from the library.) So the module that + handles Condition B is conditionally compiled and then enabled + on a per-file basis. + +

    If the application desires to produce an HDF4 file (determined + by arguments to H5Fopen), and the Condition B + module is compiled into the library, then H5Fclose + calls the module to traverse the HDF5 wrapper and generate an + additional internal or external HDF4 wrapper (wrapper specifics + are described below). If Condition B is implemented as a module + then it can benefit from the metadata already cached by the main + library. + +

    An internal HDF4 wrapper would be used if the HDF5 file is + writable and the user doesn't mind that the HDF5 file is + modified. An external wrapper would be used if the file isn't + writable or if the user wants the data file to be primarily HDF5 + but a few applications need an HDF4 view of the data. + +

    Modifying through the HDF5 library an HDF5 file that has + internal HDF4 wrapper should invalidate the HDF4 wrapper (and + optionally regenerate it when H5Fclose is + called). The HDF5 library must understand how wrappers work, but + not necessarily anything about the HDF4 file format. + +

    Modifying through the HDF5 library an HDF5 file that has an + external HDF4 wrapper will cause the HDF4 wrapper to become out + of date (but possibly regenerated during H5Fclose). + Note: Perhaps the next release of the HDF4 library should + insure that the HDF4 wrapper file has a more recent modification + time than the raw data file (the HDF5 file) to which it + points(?) + +

    Modifying through the HDF4 library an HDF5 file that has an + internal or external HDF4 wrapper will cause the HDF5 wrapper to + become out of date. However, there is now way for the old HDF4 + library to notify the HDF5 wrapper that it's out of date. + Therefore the HDF5 library must be able to detect when the HDF5 + wrapper is out of date and be able to fix it. If the HDF4 + wrapper is complete then the easy way is to ignore the original + HDF5 wrapper and generate a new one from the HDF4 wrapper. The + other approach is to compare the HDF4 and HDF5 wrappers and + assume that if they differ HDF4 is the right one, if HDF4 omits + data then it was because HDF4 is a partial wrapper (rather than + assume HDF4 deleted the data), and if HDF4 has new data then + copy the new meta data to the HDF5 wrapper. On the other hand, + perhaps we don't need to allow these situations (modifying an + HDF5 file with the old HDF4 library and then accessing it with + the HDF5 library is either disallowed or causes HDF5 objects + that can't be described by HDF4 to be lost). + +

    To convert an HDF5 file to an HDF4 file on demand, one simply + opens the file with the HDF4 flag and closes it. This is also + how AIO implemented backward compatability with PDB in its file + format. + +


    +

    Implementation of Condition C

    + +

    This condition must be satisfied for all time because there + will always be archived HDF4 files. If a pure HDF4 file (that + is, one without HDF5 meta data) is opened with an HDF5 library, + the H5Fopen builds an internal or external HDF5 + wrapper and then accesses the raw data through that wrapper. If + the HDF5 library modifies the file then the HDF4 wrapper becomes + out of date. However, since the HDF5 library hasn't been + released, we can at least implement it to disable and/or reclaim + the HDF4 wrapper. + +

    If an external and temporary HDF5 wrapper is desired, the + wrapper is created through the cache like all other HDF5 files. + The data appears on disk only if a particular cached datum is + preempted. Instead of calling H5Fclose on the HDF5 + wrapper file we call H5Fabort which immediately + releases all file resources without updating the file, and then + we unlink the file from Unix. + +


    +

    What do wrappers look like?

    + +

    External wrappers are quite obvious: they contain only things + from the format specs for the wrapper and nothing from the + format specs of the format which they wrap. + +

    An internal HDF4 wrapper is added to an HDF5 file in such a way + that the file appears to be both an HDF4 file and an HDF5 + file. HDF4 requires an HDF4 file header at file offset zero. If + a user block is present then we just move the user block down a + bit (and truncate it) and insert the minimum HDF4 signature. + The HDF4 dd list and any other data it needs are + appended to the end of the file and the HDF5 signature uses the + logical file length field to determine the beginning of the + trailing part of the wrapper. + +

    +

    + + + + + + + + + + + + + +
    HDF4 minimal file header. Its main job is to point to + the dd list at the end of the file.
    User-defined block which is truncated by the size of the + HDF4 file header so that the HDF5 boot block file address + doesn't change.
    The HDF5 boot block and data, unmodified by adding the + HDF4 wrapper.
    The main part of the HDF4 wrapper. The dd + list will have entries for all parts of the file so + hdpack(?) doesn't (re)move anything.
    +
    + +

    When such a file is opened by the HDF5 library for + modification it shifts the user block back down to address zero + and fills with zeros, then truncates the file at the end of the + HDF5 data or adds the trailing HDF4 wrapper to the free + list. This prevents HDF4 applications from reading the file with + an out of date wrapper. + +

    If there is no user block then we have a problem. The HDF5 + boot block must be moved to make room for the HDF4 file header. + But moving just the boot block causes problems because all file + addresses stored in the file are relative to the boot block + address. The only option is to shift the entire file contents + by 512 bytes to open up a user block (too bad we don't have + hooks into the Unix i-node stuff so we could shift the entire + file contents by the size of a file system page without ever + performing I/O on the file :-) + +

    Is it possible to place an HDF5 wrapper in an HDF4 file? I + don't know enough about the HDF4 format, but I would suspect it + might be possible to open a hole at file address 512 (and + possibly before) by moving some things to the end of the file + to make room for the HDF5 signature. The remainder of the HDF5 + wrapper goes at the end of the file and entries are added to the + HDF4 dd list to mark the location(s) of the HDF5 + wrapper. + +


    +

    Other Thoughts

    + +

    Conversion programs that copy an entire HDF4 file to a separate, + self-contained HDF5 file and vice versa might be useful. + + + + +


    +
    Robb Matzke
    + + +Last modified: Wed Oct 8 12:34:42 EST 1997 + + + diff --git a/doc/html/TechNotes/HeapMgmt.html b/doc/html/TechNotes/HeapMgmt.html new file mode 100644 index 0000000..e9e8db4 --- /dev/null +++ b/doc/html/TechNotes/HeapMgmt.html @@ -0,0 +1,84 @@ + + + +

    Heap Management in HDF5

    + +
    +
    +Heap functions are in the H5H package.
    +
    +
    +off_t
    +H5H_new (hdf5_file_t *f, size_t size_hint, size_t realloc_hint);
    +
    +	Creates a new heap in the specified file which can efficiently
    +	store at least SIZE_HINT bytes.  The heap can store more than
    +	that, but doing so may cause the heap to become less efficient
    +	(for instance, a heap implemented as a B-tree might become
    +	discontigous).  The REALLOC_HINT is the minimum number of bytes
    +	by which the heap will grow when it must be resized. The hints
    +	may be zero in which case reasonable (but probably not
    +	optimal) values will be chosen.
    +
    +	The return value is the address of the new heap relative to
    +	the beginning of the file boot block.
    +
    +off_t
    +H5H_insert (hdf5_file_t *f, off_t addr, size_t size, const void *buf);
    +
    +	Copies SIZE bytes of data from BUF into the heap whose address
    +	is ADDR in file F.  BUF must be the _entire_ heap object.  The
    +	return value is the byte offset of the new data in the heap.
    +
    +void *
    +H5H_read (hdf5_file_t *f, off_t addr, off_t offset, size_t size, void *buf);
    +
    +	Copies SIZE bytes of data from the heap whose address is ADDR
    +	in file F into BUF and then returns the address of BUF.  If
    +	BUF is the null pointer then a new buffer will be malloc'd by
    +	this function and its address is returned.
    +
    +	Returns buffer address or null.
    +
    +const void *
    +H5H_peek (hdf5_file_t *f, off_t addr, off_t offset)
    +
    +	A more efficient version of H5H_read that returns a pointer
    +	directly into the cache; the data is not copied from the cache
    +	to a buffer.  The pointer is valid until the next call to an
    +	H5AC function directly or indirectly.
    +
    +	Returns a pointer or null.  Do not free the pointer.
    +
    +void *
    +H5H_write (hdf5_file_t *f, off_t addr, off_t offset, size_t size,
    +           const void *buf);
    +
    +	Modifies (part of) an object in the heap at address ADDR of
    +	file F by copying SIZE bytes from the beginning of BUF to the
    +	file.  OFFSET is the address withing the heap where the output
    +	is to occur.
    +
    +	This function can fail if the combination of OFFSET and SIZE
    +	would write over a boundary between two heap objects.
    +
    +herr_t
    +H5H_remove (hdf5_file_t *f, off_t addr, off_t offset, size_t size);
    +
    +	Removes an object or part of an object which begins at byte
    +	OFFSET within a heap whose address is ADDR in file F.  SIZE
    +	bytes are returned to the free list.  Removing the middle of
    +	an object has the side effect that one object is now split
    +	into two objects.
    +
    +	Returns success or failure.
    +
    +===========================================
    +Last Modified:  8 July 1998 (technical content)
    +Last Modified:  28 April 2000 (included in HDF5 Technical Notes)
    +HDF Help Desk:  hdfhelp@ncsa.uiuc.edu
    +
    +
    + + + diff --git a/doc/html/TechNotes/IOPipe.html b/doc/html/TechNotes/IOPipe.html new file mode 100644 index 0000000..7c24e2c --- /dev/null +++ b/doc/html/TechNotes/IOPipe.html @@ -0,0 +1,114 @@ + + + + The Raw Data I/O Pipeline + + + +

    The Raw Data I/O Pipeline

    + +

    The HDF5 raw data pipeline is a complicated beast that handles + all aspects of raw data storage and transfer of that data + between the file and the application. Data can be stored + contiguously (internal or external), in variable size external + segments, or regularly chunked; it can be sparse, extendible, + and/or compressible. Data transfers must be able to convert from + one data space to another, convert from one number type to + another, and perform partial I/O operations. Furthermore, + applications will expect their common usage of the pipeline to + perform well. + +

    To accomplish these goals, the pipeline has been designed in a + modular way so no single subroutine is overly complicated and so + functionality can be inserted easily at the appropriate + locations in the pipeline. A general pipeline was developed and + then certain paths through the pipeline were optimized for + performance. + +

    We describe only the file-to-memory side of the pipeline since + the memory-to-file side is a mirror image. We also assume that a + proper hyperslab of a simple data space is being read from the + file into a proper hyperslab of a simple data space in memory, + and that the data type is a compound type which may require + various number conversions on its members. + + Figure 1 + +

    The diagrams should be read from the top down. The Line A + in the figure above shows that H5Dread() copies + data from a hyperslab of a file dataset to a hyperslab of an + application buffer by calling H5D_read(). And + H5D_read() calls, in a loop, + H5S_simp_fgath(), H5T_conv_struct(), + and H5S_simp_mscat(). A temporary buffer, TCONV, is + loaded with data points from the file, then data type conversion + is performed on the temporary buffer, and finally data points + are scattered out to application memory. Thus, data type + conversion is an in-place operation and data space conversion + consists of two steps. An additional temporary buffer, BKG, is + large enough to hold N instances of the destination + data type where N is the same number of data points + that can be held by the TCONV buffer (which is large enough to + hold either source or destination data points). + +

    The application sets an upper limit for the size of the TCONV + buffer and optionally supplies a buffer. If no buffer is + supplied then one will be created by calling + malloc() when the pipeline is executed (when + necessary) and freed when the pipeline exits. The size of the + BKG buffer depends on the size of the TCONV buffer and if the + application supplies a BKG buffer it should be at least as large + as the TCONV buffer. The default size for these buffers is one + megabyte but the buffer might not be used to full capacity if + the buffer size is not an integer multiple of the source or + destination data point size (whichever is larger, but only + destination for the BKG buffer). + + + +

    Occassionally the destination data points will be partially + initialized and the H5Dread() operation should not + clobber those values. For instance, the destination type might + be a struct with members a and b where + a is already initialized and we're reading + b from the file. An extra line, G, is added to the + pipeline to provide the type conversion functions with the + existing data. + + Figure 2 + +

    It will most likely be quite common that no data type + conversion is necessary. In such cases a temporary buffer for + data type conversion is not needed and data space conversion + can happen in a single step. In fact, when the source and + destination data are both contiguous (they aren't in the + picture) the loop degenerates to a single iteration. + + + Figure 3 + +

    So far we've looked only at internal contiguous storage, but by + replacing Line B in Figures 1 and 2 and Line A in Figure 3 with + Figure 4 the pipeline is able to handle regularly chunked + objects. Line B of Figure 4 is executed once for each chunk + which contains data to be read and the chunk address is found by + looking at a multi-dimensional key in a chunk B-tree which has + one entry per chunk. + + Figure 4 + +

    If a single chunk is requested and the destination buffer is + the same size/shape as the chunk, then the CHUNK buffer is + bypassed and the destination buffer is used instead as shown in + Figure 5. + + Figure 5 + +


    +
    Robb Matzke
    + + +Last modified: Wed Mar 18 10:38:30 EST 1998 + + + diff --git a/doc/html/TechNotes/LibMaint.html b/doc/html/TechNotes/LibMaint.html new file mode 100644 index 0000000..718f085 --- /dev/null +++ b/doc/html/TechNotes/LibMaint.html @@ -0,0 +1,128 @@ + + + + +

    Information for HDF5 Maintainers

    + +
    +
    +* You can run make from any directory.  However, running in a
    +  subdirectory only knows how to build things in that directory and
    +  below.  However, all makefiles know when their target depends on
    +  something outside the local directory tree:
    +
    +	$ cd test
    +	$ make
    +	make: *** No rule to make target ../src/libhdf5.a
    +
    +* All Makefiles understand the following targets:
    +
    +        all              -- build locally.
    +        install          -- install libs, headers, progs.
    +        uninstall        -- remove installed files.
    +        mostlyclean      -- remove temp files (eg, *.o but not *.a).
    +        clean            -- mostlyclean plus libs and progs.
    +        distclean        -- all non-distributed files.
    +        maintainer-clean -- all derived files but H5config.h.in and configure.
    +
    +* Most Makefiles also understand:
    +
    +	TAGS		-- build a tags table
    +	dep, depend	-- recalculate source dependencies
    +	lib		-- build just the libraries w/o programs
    +
    +* If you have personal preferences for which make, compiler, compiler
    +  flags, preprocessor flags, etc., that you use and you don't want to
    +  set environment variables, then use a site configuration file.
    +
    +  When configure starts, it looks in the config directory for files
    +  whose name is some combination of the CPU name, vendor, and
    +  operating system in this order:
    +
    +	CPU-VENDOR-OS
    +	VENDOR-OS
    +	CPU-VENDOR
    +	OS
    +	VENDOR
    +	CPU
    +
    +  The first file which is found is sourced and can therefore affect
    +  the behavior of the rest of configure. See config/BlankForm for the
    +  template.
    +
    +* If you use GNU make along with gcc the Makefile will contain targets
    +  that automatically maintain a list of source interdependencies; you
    +  seldom have to say `make clean'.  I say `seldom' because if you
    +  change how one `*.h' file includes other `*.h' files you'll have
    +  to force an update.
    +
    +  To force an update of all dependency information remove the
    +  `.depend' file from each directory and type `make'.  For
    +  instance:
    +
    +	$ cd $HDF5_HOME
    +	$ find . -name .depend -exec rm {} \;
    +	$ make
    +
    +  If you're not using GNU make and gcc then dependencies come from
    +  ".distdep" files in each directory.  Those files are generated on
    +  GNU systems and inserted into the Makefile's by running
    +  config.status (which happens near the end of configure).
    +
    +* If you use GNU make along with gcc then the Perl script `trace' is
    +  run just before dependencies are calculated to update any H5TRACE()
    +  calls that might appear in the file.  Otherwise, after changing the
    +  type of a function (return type or argument types) one should run
    +  `trace' manually on those source files (e.g., ../bin/trace *.c).
    +
    +* Object files stay in the directory and are added to the library as a
    +  final step instead of placing the file in the library immediately
    +  and removing it from the directory.  The reason is three-fold:
    +
    +	1.  Most versions of make don't allow `$(LIB)($(SRC:.c=.o))'
    +	    which makes it necessary to have two lists of files, one
    +	    that ends with `.c' and the other that has the library
    +	    name wrapped around each `.o' file.
    +
    +	2.  Some versions of make/ar have problems with modification
    +	    times of archive members.
    +
    +	3.  Adding object files immediately causes problems on SMP
    +	    machines where make is doing more than one thing at a
    +	    time.
    +
    +* When using GNU make on an SMP you can cause it to compile more than
    +  one thing at a time.  At the top of the source tree invoke make as
    +
    +	$ make -j -l6
    +
    +  which causes make to fork as many children as possible as long as
    +  the load average doesn't go above 6.  In subdirectories one can say
    +
    +	$ make -j2
    +
    +  which limits the number of children to two (this doesn't work at the
    +  top level because the `-j2' is not passed to recursive makes).
    +
    +* To create a release tarball go to the top-level directory and run
    +  ./bin/release.  You can optionally supply one or more of the words
    +  `tar', `gzip', `bzip2' or `compress' on the command line.  The
    +  result will be a (compressed) tar file(s) in the `releases'
    +  directory.  The README file is updated to contain the release date
    +  and version number.
    +
    +* To create a tarball of all the files which are part of HDF5 go to
    +  the top-level directory and type:
    +
    +      tar cvf foo.tar `grep '^\.' MANIFEST |unexpand |cut -f1`
    +
    +
    +===========================================
    +Last Modified:  15 October 1999 (technical content)
    +Last Modified:  28 April 2000 (included in HDF5 Technical Notes)
    +HDF Help Desk:  hdfhelp@ncsa.uiuc.edu
    +
    +
    + + + diff --git a/doc/html/TechNotes/MemoryMgmt.html b/doc/html/TechNotes/MemoryMgmt.html new file mode 100644 index 0000000..93782b5 --- /dev/null +++ b/doc/html/TechNotes/MemoryMgmt.html @@ -0,0 +1,510 @@ + + + + Memory Management in HDF5 + + + +

    Memory Management in HDF5

    + + +

    Is a Memory Manager Necessary?

    + +

    Some form of memory management may be necessary in HDF5 when + the various deletion operators are implemented so that the + file memory is not permanently orphaned. However, since an + HDF5 file was designed with persistent data in mind, the + importance of a memory manager is questionable. + +

    On the other hand, when certain meta data containers (file glue) + grow, they may need to be relocated in order to keep the + container contiguous. + +

    + Example: An object header consists of up to two + chunks of contiguous memory. The first chunk is a fixed + size at a fixed location when the header link count is + greater than one. Thus, inserting additional items into an + object header may require the second chunk to expand. When + this occurs, the second chunk may need to move to another + location in the file, freeing the file memory which that + chunk originally occupied. +
    + +

    The relocation of meta data containers could potentially + orphan a significant amount of file memory if the application + has made poor estimates for preallocation sizes. + + +

    Levels of Memory Management

    + +

    Memory management by the library can be independent of memory + management support by the file format. The file format can + support no memory management, some memory management, or full + memory management. Similarly with the library. + +

    Support in the Library

    + +
    +
    No Support: I +
    When memory is deallocated it simply becomes unreferenced + (orphaned) in the file. Memory allocation requests are + satisfied by extending the file. + +
    A separate off-line utility can be used to detect the + unreferenced bytes of a file and "bubble" them up to the end + of the file and then truncate the file. + +
    Some Support: II +
    The library could support partial memory management all + the time, or full memory management some of the time. + Orphaning free blocks instead of adding them to a free list + should not affect the file integrity, nor should fulfilling + new requests by extending the file instead of using the free + list. + +
    Full Support: III +
    The library supports space-efficient memory management by + always fulfilling allocation requests from the free list when + possible, and by coalescing adjacent free blocks into a + single larger free block. +
    + +

    Support in the File Format

    + +
    +
    No Support: A +
    The file format does not support memory management; any + unreferenced block in the file is assumed to be free. If + the library supports full memory management then it will + have to traverse the entire file to determine which blocks + are unreferenced. + +
    Some Support: B +
    Assuming that unreferenced blocks are free can be + dangerous in a situation where the file is not consistent. + For instance, if a directory tree becomes detached from the + main directory hierarchy, then the detached directory and + everything that is referenced only through the detached + directory become unreferenced. File repair utilities will + be unable to determine which unreferenced blocks need to be + linked back into the file hierarchy. + +
    Therefore, it might be useful to keep an unsorted, + doubly-linked list of free blocks in the file. The library + can add and remove blocks from the list in constant time, + and can generate its own internal free-block data structure + in time proportional to the number of free blocks instead of + the size of the file. Additionally, a library can use a + subset of the free blocks, an alternative which is not + feasible if the file format doesn't support any form of + memory management. + +
    Full Support: C +
    The file format can mirror library data structures for + space-efficient memory management. The free blocks are + linked in unsorted, doubly-linked lists with one list per + free block size. The heads of the lists are pointed to by a + B-tree whose nodes are sorted by free block size. At the + same time, all free blocks are the leaf nodes of another + B-tree sorted by starting and ending address. When the + trees are used in combination we can deallocate and allocate + memory in O(log N) time where N is the + number of free blocks. +
    + +

    Combinations of Library and File Format Support

    + +

    We now evaluate each combination of library support with file + support: + +

    +
    I-A +
    If neither the library nor the file support memory + management, then each allocation request will come from the + end of the file and each deallocation request is a no-op + that simply leaves the free block unreferenced. + +
      +
    • Advantages +
        +
      • No file overhead for allocation or deallocation. +
      • No library overhead for allocation or + deallocation. +
      • No file traversal required at time of open. +
      • No data needs to be written back to the file when + it's closed. +
      • Trivial to implement (already implemented). +
      + +
    • Disadvantages +
        +
      • Inefficient use of file space. +
      • A file repair utility must reclaim lost file space. +
      • Difficulties for file repair utilities. (Is an + unreferenced block a free block or orphaned data?) +
      +
    + +
    II-A +
    In order for the library to support memory management, it + will be required to build the internal free block + representation by traversing the entire file looking for + unreferenced blocks. + +
      +
    • Advantages +
        +
      • No file overhead for allocation or deallocation. +
      • Variable amount of library overhead for allocation + and deallocation depending on how much work the + library wants to do. +
      • No data needs to be written back to the file when + it's closed. +
      • Might use file space efficiently. +
      +
    • Disadvantages +
        +
      • Might use file space inefficiently. +
      • File traversal required at time of open. +
      • A file repair utility must reclaim lost file space. +
      • Difficulties for file repair utilities. +
      • Sharing of the free list between processes falls + outside the HDF5 file format documentation. +
      +
    + +
    III-A +
    In order for the library to support full memory + management, it will be required to build the internal free + block representation by traversing the entire file looking + for unreferenced blocks. + +
      +
    • Advantages +
        +
      • No file overhead for allocation or deallocation. +
      • Efficient use of file space. +
      • No data needs to be written back to the file when + it's closed. +
      +
    • Disadvantages +
        +
      • Moderate amount of library overhead for allocation + and deallocation. +
      • File traversal required at time of open. +
      • A file repair utility must reclaim lost file space. +
      • Difficulties for file repair utilities. +
      • Sharing of the free list between processes falls + outside the HDF5 file format documentation. +
      +
    + +
    I-B +
    If the library doesn't support memory management but the + file format supports some level of management, then a file + repair utility will have to be run occasionally to reclaim + unreferenced blocks. + +
      +
    • Advantages +
        +
      • No file overhead for allocation or deallocation. +
      • No library overhead for allocation or + deallocation. +
      • No file traversal required at time of open. +
      • No data needs to be written back to the file when + it's closed. +
      +
    • Disadvantages +
        +
      • A file repair utility must reclaim lost file space. +
      • Difficulties for file repair utilities. +
      +
    + +
    II-B +
    Both the library and the file format support some level + of memory management. + +
      +
    • Advantages +
        +
      • Constant file overhead per allocation or + deallocation. +
      • Variable library overhead per allocation or + deallocation depending on how much work the library + wants to do. +
      • Traversal at file open time is on the order of the + free list size instead of the file size. +
      • The library has the option of reading only part of + the free list. +
      • No data needs to be written at file close time if + it has been amortized into the cost of allocation + and deallocation. +
      • File repair utilties don't have to be run to + reclaim memory. +
      • File repair utilities can detect whether an + unreferenced block is a free block or orphaned data. +
      • Sharing of the free list between processes might + be easier. +
      • Possible efficient use of file space. +
      +
    • Disadvantages +
        +
      • Possible inefficient use of file space. +
      +
    + +
    III-B +
    The library provides space-efficient memory management but + the file format only supports an unsorted list of free + blocks. + +
      +
    • Advantages +
        +
      • Constant time file overhead per allocation or + deallocation. +
      • No data needs to be written at file close time if + it has been amortized into the cost of allocation + and deallocation. +
      • File repair utilities don't have to be run to + reclaim memory. +
      • File repair utilities can detect whether an + unreferenced block is a free block or orphaned data. +
      • Sharing of the free list between processes might + be easier. +
      • Efficient use of file space. +
      +
    • Disadvantages +
        +
      • O(log N) library overhead per allocation or + deallocation where N is the total number of + free blocks. +
      • O(N) time to open a file since the entire + free list must be read to construct the in-core + trees used by the library. +
      • Library is more complicated. +
      +
    + +
    I-C +
    This has the same advantages and disadvantages as I-C with + the added disadvantage that the file format is much more + complicated. + +
    II-C +
    If the library only provides partial memory management but + the file requires full memory management, then this method + degenerates to the same as II-A with the added disadvantage + that the file format is much more complicated. + +
    III-C +
    The library and file format both provide complete data + structures for space-efficient memory management. + +
      +
    • Advantages +
        +
      • Files can be opened in constant time since the + free list is read on demand and amortised into the + allocation and deallocation requests. +
      • No data needs to be written back to the file when + it's closed. +
      • File repair utilities don't have to be run to + reclaim memory. +
      • File repair utilities can detect whether an + unreferenced block is a free block or orphaned data. +
      • Sharing the free list between processes is easy. +
      • Efficient use of file space. +
      +
    • Disadvantages +
        +
      • O(log N) file allocation and deallocation + cost where N is the total number of free + blocks. +
      • O(log N) library allocation and + deallocation cost. +
      • Much more complicated file format. +
      • More complicated library. +
      +
    + +
    + + +

    The Algorithm for II-B

    + +

    The file contains an unsorted, doubly-linked list of free + blocks. The address of the head of the list appears in the + boot block. Each free block contains the following fields: + +

    + + + + + + + + + + + + + + + + + + + + + +
    bytebytebytebyte
    Free Block Signature
    Total Free Block Size
    Address of Left Sibling
    Address of Right Sibling


    Remainder of Free Block


    +
    + +

    The library reads as much of the free list as convenient when + convenient and pushes those entries onto stacks. This can + occur when a file is opened or any time during the life of the + file. There is one stack for each free block size and the + stacks are sorted by size in a balanced tree in memory. + +

    Deallocation involves finding the correct stack or creating + a new one (an O(log K) operation where K is + the number of stacks), pushing the free block info onto the + stack (a constant-time operation), and inserting the free + block into the file free block list (a constant-time operation + which doesn't necessarily involve any I/O since the free blocks + can be cached like other objects). No attempt is made to + coalesce adjacent free blocks into larger blocks. + +

    Allocation involves finding the correct stack (an O(log + K) operation), removing the top item from the stack + (a constant-time operation), and removing the block from the + file free block list (a constant-time operation). If there is + no free block of the requested size or larger, then the file + is extended. + +

    To provide sharability of the free list between processes, + the last step of an allocation will check for the free block + signature and if it doesn't find one will repeat the process. + Alternatively, a process can temporarily remove free blocks + from the file and hold them in it's own private pool. + +

    To summarize... +

    +
    File opening +
    O(N) amortized over the time the file is open, + where N is the number of free blocks. The library + can still function without reading any of the file free + block list. + +
    Deallocation +
    O(log K) where K is the number of unique + sizes of free blocks. File access is constant. + +
    Allocation +
    O(log K). File access is constant. + +
    File closing +
    O(1) even if the library temporarily removes free + blocks from the file to hold them in a private pool since + the pool can still be a linked list on disk. +
    + + +

    The Algorithm for III-C

    + +

    The HDF5 file format supports a general B-tree mechanism + for storing data with keys. If we use a B-tree to represent + all parts of the file that are free and the B-tree is indexed + so that a free file chunk can be found if we know the starting + or ending address, then we can efficiently determine whether a + free chunk begins or ends at the specified address. Call this + the Address B-Tree. + +

    If a second B-tree points to a set of stacks where the + members of a particular stack are all free chunks of the same + size, and the tree is indexed by chunk size, then we can + efficiently find the best-fit chunk size for a memory request. + Call this the Size B-Tree. + +

    All free blocks of a particular size can be linked together + with an unsorted, doubly-linked, circular list and the left + and right sibling addresses can be stored within the free + chunk, allowing us to remove or insert items from the list in + constant time. + +

    Deallocation of a block fo file memory consists of: + +

      +
    1. Add the new free block whose address is ADDR to the + address B-tree. + +
        +
      1. If the address B-tree contains an entry for a free + block that ends at ADDR-1 then remove that + block from the B-tree and from the linked list (if the + block was the first on the list then the size B-tree + must be updated). Adjust the size and address of the + block being freed to include the block just removed from + the free list. The time required to search for and + possibly remove the left block is O(log N) + where N is the number of free blocks. + +
      2. If the address B-tree contains an entry for the free + block that begins at ADDR+LENGTH then + remove that block from the B-tree and from the linked + list (if the block was the first on the list then the + size B-tree must be updated). Adjust the size of the + block being freed to include the block just removed from + the free list. The time required to search for and + possibly remove the right block is O(log N). + +
      3. Add the new (adjusted) block to the address B-tree. + The time for this operation is O(log N). +
      + +
    2. Add the new block to the size B-tree and linked list. + +
        +
      1. If the size B-tree has an entry for this particular + size, then add the chunk to the tail of the list. This + is an O(log K) operation where K is + the number of unique free block sizes. + +
      2. Otherwise make a new entry in the B-tree for chunks of + this size. This is also O(log K). +
      +
    + +

    Allocation is similar to deallocation. + +

    To summarize... + +

    +
    File opening +
    O(1) + +
    Deallocation +
    O(log N) where N is the total number of + free blocks. File access time is O(log N). + +
    Allocation +
    O(log N). File access time is O(log N). + +
    File closing +
    O(1). +
    + + +
    +
    Robb Matzke
    + + +Last modified: Thu Jul 31 14:41:01 EST + + + diff --git a/doc/html/TechNotes/MoveDStruct.html b/doc/html/TechNotes/MoveDStruct.html new file mode 100644 index 0000000..4576bd2 --- /dev/null +++ b/doc/html/TechNotes/MoveDStruct.html @@ -0,0 +1,66 @@ + + + + Relocating a File Data Structure + + + +

    Relocating a File Data Structure

    + +

    Since file data structures can be cached in memory by the H5AC + package it becomes problematic to move such a data structure in + the file. One cannot just copy a portion of the file from one + location to another because: + +

      +
    1. the file might not contain the latest information, and
    2. +
    3. the H5AC package might not realize that the object's + address has changed and attempt to write the object to disk + at the old address.
    4. +
    + +

    Here's a correct method to move data from one location to + another. The example code assumes that one is moving a B-link + tree node from old_addr to new_addr. + +

      +
    1. Make sure the disk is up-to-date with respect to the + cache. There is no need to remove the item from the cache, + hence the final argument to H5AC_flush is + FALSE. +

      + + H5AC_flush (f, H5AC_BT, old_addr, FALSE);
      +
      +
      +
    2. + +
    3. Read the data from the old address and write it to the new + address. +

      + + H5F_block_read (f, old_addr, size, buf);
      + H5F_block_write (f, new_addr, size, buf);
      +
      +
      +
    4. + +
    5. Notify the cache that the address of the object changed. +

      + + H5AC_rename (f, H5AC_BT, old_addr, new_addr);
      +
      +
      +
    6. +
    + + + +
    +
    Robb Matzke
    + + +Last modified: Mon Jul 14 15:38:29 EST + + + diff --git a/doc/html/TechNotes/NamingScheme.html b/doc/html/TechNotes/NamingScheme.html new file mode 100644 index 0000000..dbf55bf --- /dev/null +++ b/doc/html/TechNotes/NamingScheme.html @@ -0,0 +1,300 @@ + + + HDF5 Naming Scheme + + + + + +

    +
    HDF5 Naming Scheme for

    + +

    +

    +

    +

    + Authors: + Quincey Koziol and + + Robb Matzke + +
    + +

    +

    +
    + This file /hdf3/web/hdf/internal/HDF_standard/HDF5.coding_standard.html is + maintained by Elena Pourmal + epourmal@ncsa.uiuc.edu . +
    +

    +

    + Last modified August 5, 1997 +
    + +
    + + + diff --git a/doc/html/TechNotes/ObjectHeader.html b/doc/html/TechNotes/ObjectHeader.html new file mode 100644 index 0000000..1335d23 --- /dev/null +++ b/doc/html/TechNotes/ObjectHeader.html @@ -0,0 +1,72 @@ + + + +

    Object Headers

    + +
    +
    +haddr_t
    +H5O_new (hdf5_file_t *f, intn nrefs, size_t size_hint)
    +
    +	Creates a new empty object header and returns its address.
    +	The SIZE_HINT is the initial size of the data portion of the
    +	object header and NREFS is the number of symbol table entries
    +	that reference this object header (normally one).
    +
    +	If SIZE_HINT is too small, then at least some default amount
    +	of space is allocated for the object header.
    +
    +intn				        /*num remaining links		*/
    +H5O_link (hdf5_file_t *f,		/*file containing header	*/
    +	  haddr_t addr,			/*header file address		*/
    +	  intn adjust)			/*link adjustment amount	*/
    +
    +
    +size_t
    +H5O_sizeof (hdf5_file_t *f,		/*file containing header	*/
    +	    haddr_t addr,		/*header file address		*/
    +            H5O_class_t *type,		/*message type or H5O_ANY	*/
    +	    intn sequence)		/*sequence number, usually zero	*/
    +		
    +	Returns the size of a particular instance of a message in an
    +	object header.  When an object header has more than one
    +	instance of a particular message type, then SEQUENCE indicates
    +	which instance to return.
    +
    +void *
    +H5O_read (hdf5_file_t *f,		/*file containing header	*/
    +	  haddr_t addr,			/*header file address		*/
    +	  H5G_entry_t *ent,		/*optional symbol table entry	*/
    +	  H5O_class_t *type,		/*message type or H5O_ANY	*/
    +	  intn sequence,		/*sequence number, usually zero	*/
    +	  size_t size,			/*size of output message	*/
    +	  void *mesg)			/*output buffer			*/
    +
    +	Reads a message from the object header into memory.
    +
    +const void *
    +H5O_peek (hdf5_file_t *f,		/*file containing header	*/
    +          haddr_t addr,			/*header file address		*/
    +	  H5G_entry_t *ent,		/*optional symbol table entry	*/
    +	  H5O_class_t *type,		/*type of message or H5O_ANY	*/
    +	  intn sequence)		/*sequence number, usually zero	*/
    +
    +haddr_t					/*new heap address		*/
    +H5O_modify (hdf5_file_t *f,		/*file containing header	*/
    +            haddr_t addr,		/*header file address		*/
    +	    H5G_entry_t *ent,		/*optional symbol table entry	*/
    +	    hbool_t *ent_modified,	/*entry modification flag	*/
    +	    H5O_class_t *type,		/*message type			*/
    +	    intn overwrite,		/*sequence number or -1		*/
    +	    void *mesg)			/*the message			*/  
    +	  
    +
    +===========================================
    +Last Modified:  8 July 1998 (technical content)
    +Last Modified:  28 April 2000 (included in HDF5 Technical Notes)
    +HDF Help Desk:  hdfhelp@ncsa.uiuc.edu
    +
    +
    + + + diff --git a/doc/html/TechNotes/RawDStorage.html b/doc/html/TechNotes/RawDStorage.html new file mode 100644 index 0000000..87ea54d --- /dev/null +++ b/doc/html/TechNotes/RawDStorage.html @@ -0,0 +1,274 @@ + + + + Raw Data Storage in HDF5 + + + +

    Raw Data Storage in HDF5

    + +

    This document describes the various ways that raw data is + stored in an HDF5 file and the object header messages which + contain the parameters for the storage. + +

    Raw data storage has three components: the mapping from some + logical multi-dimensional element space to the linear address + space of a file, compression of the raw data on disk, and + striping of raw data across multiple files. These components + are orthogonal. + +

    Some goals of the storage mechanism are to be able to + efficently store data which is: + +

    +
    Small +
    Small pieces of raw data can be treated as meta data and + stored in the object header. This will be achieved by storing + the raw data in the object header with message 0x0006. + Compression and striping are not supported in this case. + +
    Complete Large +
    The library should be able to store large arrays + contiguously in the file provided the user knows the final + array size a priori. The array can then be read/written in a + single I/O request. This is accomplished by describing the + storage with object header message 0x0005. Compression and + striping are not supported in this case. + +
    Sparse Large +
    A large sparse raw data array should be stored in a manner + that is space-efficient but one in which any element can still + be accessed in a reasonable amount of time. Implementation + details are below. + +
    Dynamic Size +
    One often doesn't have prior knowledge of the size of an + array. It would be nice to allow arrays to grow dynamically in + any dimension. It might also be nice to allow the array to + grow in the negative dimension directions if convenient to + implement. Implementation details are below. + +
    Subslab Access +
    Some multi-dimensional arrays are almost always accessed by + subslabs. For instance, a 2-d array of pixels might always be + accessed as smaller 1k-by-1k 2-d arrays always aligned on 1k + index values. We should be able to store the array in such a + way that striding though the entire array is not necessary. + Subslab access might also be useful with compression + algorithms where each storage slab can be compressed + independently of the others. Implementation details are below. + +
    Compressed +
    Various compression algorithms can be applied to the entire + array. We're not planning to support separate algorithms (or a + single algorithm with separate parameters) for each chunk + although it would be possible to implement that in a manner + similar to the way striping across files is + implemented. + +
    Striped Across Files +
    The array access functions should support arrays stored + discontiguously across a set of files. +
    + +

    Implementation of Indexed Storage

    + +

    The Sparse Large, Dynamic Size, and Subslab Access methods + share so much code that they can be described with a single + message. The new Indexed Storage Message (0x0008) + will replace the old Chunked Object (0x0009) and + Sparse Object (0x000A) Messages. + +

    +

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + The Format of the Indexed Storage Message +
    bytebytebytebyte

    Address of B-tree

    Number of DimensionsReservedReservedReserved
    Reserved (4 bytes)
    Alignment for Dimension 0 (4 bytes)
    Alignment for Dimension 1 (4 bytes)
    ...
    Alignment for Dimension N (4 bytes)
    +
    + +

    The alignment fields indicate the alignment in logical space to + use when allocating new storage areas on disk. For instance, + writing every other element of a 100-element one-dimensional + array (using one HDF5 I/O partial write operation per element) + that has unit storage alignment would result in 50 + single-element, discontiguous storage segments. However, using + an alignment of 25 would result in only four discontiguous + segments. The size of the message varies with the number of + dimensions. + +

    A B-tree is used to point to the discontiguous portions of + storage which has been allocated for the object. All keys of a + particular B-tree are the same size and are a function of the + number of dimensions. It is therefore not possible to change the + dimensionality of an indexed storage array after its B-tree is + created. + +

    +

    + + + + + + + + + + + + + + + + + + + + + + + + +
    + The Format of a B-Tree Key +
    bytebytebytebyte
    External File Number or Zero (4 bytes)
    Chunk Offset in Dimension 0 (4 bytes)
    Chunk Offset in Dimension 1 (4 bytes)
    ...
    Chunk Offset in Dimension N (4 bytes)
    +
    + +

    The keys within a B-tree obey an ordering based on the chunk + offsets. If the offsets in dimension-0 are equal, then + dimension-1 is used, etc. The External File Number field + contains a 1-origin offset into the External File List message + which contains the name of the external file in which that chunk + is stored. + +

    Implementation of Striping

    + +

    The indexed storage will support arbitrary striping at the + chunk level; each chunk can be stored in any file. This is + accomplished by using the External File Number field of an + indexed storage B-tree key as a 1-origin offset into an External + File List Message (0x0009) which takes the form: + +

    +

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + The Format of the External File List Message +
    bytebytebytebyte

    Name Heap Address

    Number of Slots Allocated (4 bytes)
    Number of File Names (4 bytes)
    Byte Offset of Name 1 in Heap (4 bytes)
    Byte Offset of Name 2 in Heap (4 bytes)
    ...

    Unused Slot(s)

    +
    + +

    Each indexed storage array that has all or part of its data + stored in external files will contain a single external file + list message. The size of the messages is determined when the + message is created, but it may be possible to enlarge the + message on demand by moving it. At this time, it's not possible + for multiple arrays to share a single external file list + message. + +

    +
    + H5O_efl_t *H5O_efl_new (H5G_entry_t *object, intn + nslots_hint, intn heap_size_hint) + +
    Adds a new, empty external file list message to an object + header and returns a pointer to that message. The message + acts as a cache for file descriptors of external files that + are open. + +

    + intn H5O_efl_index (H5O_efl_t *efl, const char *filename) + +
    Gets the external file index number for a particular file name. + If the name isn't in the external file list then it's added to + the H5O_efl_t struct and immediately written to the object + header to which the external file list message belongs. Name + comparison is textual. Each name should be relative to the + directory which contains the HDF5 file. + +

    + H5F_low_t *H5O_efl_open (H5O_efl_t *efl, intn index, uintn mode) + +
    Gets a low-level file descriptor for an external file. The + external file list caches file descriptors because we might + have many more external files than there are file descriptors + available to this process. The caller should not close this file. + +

    + herr_t H5O_efl_release (H5O_efl_t *efl) + +
    Releases an external file list, closes all files + associated with that list, and if the list has been modified + since the call to H5O_efl_new flushes the message + to disk. +
    + +
    +
    Robb Matzke
    + + +Last modified: Tue Nov 25 12:36:50 EST 1997 + + + diff --git a/doc/html/TechNotes/SymbolTables.html b/doc/html/TechNotes/SymbolTables.html new file mode 100644 index 0000000..05ee538 --- /dev/null +++ b/doc/html/TechNotes/SymbolTables.html @@ -0,0 +1,329 @@ + + + +

    Symbol Table Caching Issues

    + +
    +
    +A number of issues involving caching of object header messages in
    +symbol table entries must be resolved.
    +
    +What is the motivation for these changes?
    +
    +   If we make objects completely independent of object name it allows
    +   us to refer to one object by multiple names (a concept called hard
    +   links in Unix file systems), which in turn provides an easy way to
    +   share data between datasets.
    +
    +   Every object in an HDF5 file has a unique, constant object header
    +   address which serves as a handle (or OID) for the object.  The
    +   object header contains messages which describe the object.
    +
    +   HDF5 allows some of the object header messages to be cached in
    +   symbol table entries so that the object header doesn't have to be
    +   read from disk.  For instance, an entry for a directory caches the
    +   directory disk addresses required to access that directory, so the
    +   object header for that directory is seldom read.
    +
    +   If an object has multiple names (that is, a link count greater than
    +   one), then it has multiple symbol table entries which point to it.
    +   All symbol table entries must agree on header messages.  The
    +   current mechanism is to turn off the caching of header messages in
    +   symbol table entries when the header link count is more than one,
    +   and to allow caching once the link count returns to one.
    +
    +   However, in the current implementation, a package is allowed to
    +   copy a symbol table entry and use it as a private cache for the
    +   object header.  This doesn't work for a number of reasons (all but
    +   one require a `delete symbol entry' operation).
    +
    +      1. If two packages hold copies of the same symbol table entry,
    +         they don't notify each other of changes to the symbol table
    +         entry. Eventually, one package reads a cached message and
    +         gets the wrong value because the other package changed the
    +         message in the object header.
    +
    +      2. If one package holds a copy of the symbol table entry and
    +         some other part of HDF5 removes the object and replaces it
    +         with some other object, then the original package will
    +         continue to access the non-existent object using the new
    +         object header.
    +
    +      3. If one package holds a copy of the symbol table entry and
    +         some other part of HDF5 (re)moves the directory which
    +         contains the object, then the package will be unable to
    +         update the symbol table entry with the new cached
    +         data. Packages that refer to the object by the new name will
    +         use old cached data.
    +
    +
    +The basic problem is that there may be multiple copies of the object
    +symbol table entry floating around in the code when there should
    +really be at most one per hard link.
    +
    +   Level 0: A copy may exist on disk as part of a symbol table node, which
    +            is a small 1d array of symbol table entries.
    +
    +   Level 1: A copy may be cached in memory as part of a symbol table node
    +	    in the H5Gnode.c file by the H5AC layer.
    +
    +   Level 2a: Another package may be holding a copy so it can perform
    +   	     fast lookup of any header messages that might be cached in
    +   	     the symbol table entry.  It can't point directly to the
    +   	     cached symbol table node because that node can dissappear
    +   	     at any time.
    +
    +   Level 2b: Packages may hold more than one copy of a symbol table
    +             entry.  For instance, if H5D_open() is called twice for
    +             the same name, then two copies of the symbol table entry
    +             for the dataset exist in the H5D package.
    +
    +How can level 2a and 2b be combined?
    +
    +   If package data structures contained pointers to symbol table
    +   entries instead of copies of symbol table entries and if H5G
    +   allocated one symbol table entry per hard link, then it's trivial
    +   for Level 2a and 2b to benefit from one another's actions since
    +   they share the same cache.
    +
    +How does this work conceptually?
    +
    +   Level 2a and 2b must notify Level 1 of their intent to use (or stop
    +   using) a symbol table entry to access an object header.  The
    +   notification of the intent to access an object header is called
    +   `opening' the object and releasing the access is `closing' the
    +   object.
    +
    +   Opening an object requires an object name which is used to locate
    +   the symbol table entry to use for caching of object header
    +   messages.  The return value is a handle for the object.  Figure 1
    +   shows the state after Dataset1 opens Object with a name that maps
    +   through Entry1.  The open request created a copy of Entry1 called
    +   Shadow1 which exists even if SymNode1 is preempted from the H5AC
    +   layer.
    +
    +                                                     ______
    +                                            Object  /      \
    +	     SymNode1                     +--------+        |
    +	    +--------+            _____\  | Header |        |
    +	    |        |           /     /  +--------+        |
    +	    +--------+ +---------+                  \______/
    +	    | Entry1 | | Shadow1 | /____
    +	    +--------+ +---------+ \    \
    +	    :        :                   \
    +	    +--------+                    +----------+
    +					  | Dataset1 |
    +					  +----------+
    +			     FIGURE 1
    +
    +
    +
    +  The SymNode1 can appear and disappear from the H5AC layer at any
    +  time without affecting the Object Header data cached in the Shadow.
    +  The rules are:
    +
    +  * If the SymNode1 is present and is about to disappear and the
    +    Shadow1 dirty bit is set, then Shadow1 is copied over Entry1, the
    +    Entry1 dirty bit is set, and the Shadow1 dirty bit is cleared.
    +
    +  * If something requests a copy of Entry1 (for a read-only peek
    +    request), and Shadow1 exists, then a copy (not pointer) of Shadow1
    +    is returned instead.
    +
    +  * Entry1 cannot be deleted while Shadow1 exists.
    +
    +  * Entry1 cannot change directly if Shadow1 exists since this means
    +    that some other package has opened the object and may be modifying
    +    it.  I haven't decided if it's useful to ever change Entry1
    +    directly (except of course within the H5G layer itself).
    +
    +  * Shadow1 is created when Dataset1 `opens' the object through
    +    Entry1. Dataset1 is given a pointer to Shadow1 and Shadow1's
    +    reference count is incremented.
    +
    +  * When Dataset1 `closes' the Object the Shadow1 reference count is
    +    decremented.  When the reference count reaches zero, if the
    +    Shadow1 dirty bit is set, then Shadow1's contents are copied to
    +    Entry1, and the Entry1 dirty bit is set. Shadow1 is then deleted
    +    if its reference count is zero.  This may require reading SymNode1
    +    back into the H5AC layer.
    +
    +What happens when another Dataset opens the Object through Entry1?
    +
    +  If the current state is represented by the top part of Figure 2,
    +  then Dataset2 will be given a pointer to Shadow1 and the Shadow1
    +  reference count will be incremented to two.  The Object header link
    +  count remains at one so Object Header messages continue to be cached
    +  by Shadow1. Dataset1 and Dataset2 benefit from one another
    +  actions. The resulting state is represented by Figure 2.
    +
    +                                                     _____
    +             SymNode1                       Object  /     \
    +            +--------+            _____\  +--------+       |
    +            |        |           /     /  | Header |       |
    +            +--------+ +---------+        +--------+       |
    +            | Entry1 | | Shadow1 | /____            \_____/
    +            +--------+ +---------+ \    \
    +            :        :        _          \
    +            +--------+       |\           +----------+
    +                               \          | Dataset1 |
    +                                \________ +----------+
    +                                         \              \
    +                                          +----------+   |
    +                                          | Dataset2 |   |- New Dataset
    +                                          +----------+   |
    +                                                        /
    +			     FIGURE 2
    +
    +
    +What happens when the link count for Object increases while Dataset
    +has the Object open?
    +
    +                                                     SymNode2
    +                                                    +--------+
    +    SymNode1                       Object           |        |
    +   +--------+             ____\  +--------+ /______ +--------+
    +   |        |            /    /  | header | \      `| Entry2 |
    +   +--------+ +---------+        +--------+         +--------+
    +   | Entry1 | | Shadow1 | /____                     :        :
    +   +--------+ +---------+ \    \                    +--------+
    +   :        :                   \
    +   +--------+                    +----------+   \________________/
    +                                 | Dataset1 |            |
    +                                 +----------+         New Link
    +
    +			     FIGURE 3
    +
    +  The current state is represented by the left part of Figure 3.  To
    +  create a new link the Object Header had to be located by traversing
    +  through Entry1/Shadow1.  On the way through, the Entry1/Shadow1 
    +  cache is invalidated and the Object Header link count is
    +  incremented. Entry2 is then added to SymNode2.
    +
    +  Since the Object Header link count is greater than one, Object
    +  header data will not be cached in Entry1/Shadow1.
    +
    +  If the initial state had been all of Figure 3 and a third link is
    +  being added and Object is open by Entry1 and Entry2, then creation
    +  of the third link will invalidate the cache in Entry1 or Entry2.  It
    +  doesn't matter which since both caches are already invalidated
    +  anyway.
    +
    +What happens if another Dataset opens the same object by another name?
    +
    +  If the current state is represented by Figure 3, then a Shadow2 is
    +  created and associated with Entry2.  However, since the Object
    +  Header link count is more than one, nothing gets cached in Shadow2
    +  (or Shadow1).
    +
    +What happens if the link count decreases?
    +
    +  If the current state is represented by all of Figure 3 then it isn't
    +  possible to delete Entry1 because the object is currently open
    +  through that entry.  Therefore, the link count must have
    +  decreased because Entry2 was removed.
    +
    +  As Dataset1 reads/writes messages in the Object header they will
    +  begin to be cached in Shadow1 again because the Object header link
    +  count is one.
    +
    +What happens if the object is removed while it's open?
    +
    +  That operation is not allowed.
    +
    +What happens if the directory containing the object is deleted?
    +
    +  That operation is not allowed since deleting the directory requires
    +  that the directory be empty.  The directory cannot be emptied
    +  because the open object cannot be removed from the directory.
    +
    +What happens if the object is moved?
    +
    +  Moving an object is a process consisting of creating a new
    +  hard-link with the new name and then deleting the old name.
    +  This will fail if the object is open.
    +
    +What happens if the directory containing the entry is moved?
    +
    +  The entry and the shadow still exist and are associated with one
    +  another.
    +
    +What if a file is flushed or closed when objects are open?
    +
    +  Flushing a symbol table with open objects writes correct information
    +  to the file since Shadow is copied to Entry before the table is
    +  flushed.
    +
    +  Closing a file with open objects will create a valid file but will
    +  return failure.
    +
    +How is the Shadow associated with the Entry?
    +
    +  A symbol table is composed of one or more symbol nodes.  A node is a
    +  small 1-d array of symbol table entries.  The entries can move
    +  around within a node and from node-to-node as entries are added or
    +  removed from the symbol table and nodes can move around within a
    +  symbol table, being created and destroyed as necessary.
    +
    +  Since a symbol table has an object header with a unique and constant
    +  file offset, and since H5G contains code to efficiently locate a
    +  symbol table entry given it's name, we use these two values as a key
    +  within a shadow to associate the shadow with the symbol table
    +  entry.
    +
    +	struct H5G_shadow_t {
    +	   haddr_t	stab_addr;    /*symbol table header address*/   
    +	   char         *name;	      /*entry name wrt symbol table*/
    +           hbool_t      dirty;	      /*out-of-date wrt stab entry?*/
    +	   H5G_entry_t  ent;	      /*my copy of stab entry      */
    +	   H5G_entry_t  *main;	      /*the level 1 entry or null  */
    +           H5G_shadow_t *next, *prev; /*other shadows for this stab*/
    +      	};
    +
    +  The set of shadows will be organized in a hash table of linked
    +  lists.  Each linked list will contain the shadows associated with a
    +  particular symbol table header address and the list will be sorted
    +  lexicographically.
    +
    +  Also, each Entry will have a pointer to the corresponding Shadow or
    +  null if there is no shadow.
    +
    +  When a symbol table node is loaded into the main cache, we look up
    +  the linked list of shadows in the shadow hash table based on the
    +  address of the symbol table object header.  We then traverse that
    +  list matching shadows with symbol table entries.
    +
    +  We assume that opening/closing objects will be a relatively
    +  infrequent event compared with loading/flushing symbol table
    +  nodes. Therefore, if we keep the linked list of shadows sorted it
    +  costs O(N) to open and close objects where N is the number of open
    +  objects in that symbol table (instead of O(1)) but it costs only
    +  O(N) to load a symbol table node (instead of O(N^2)).
    +
    +What about the root symbol entry?
    +
    +  Level 1 storage for the root symbol entry is always available since
    +  it's stored in the hdf5_file_t struct instead of a symbol table
    +  node.  However, the contents of that entry can move from the file
    +  handle to a symbol table node by H5G_mkroot().  Therefore, if the
    +  root object is opened, we keep a shadow entry for it whose
    +  `stab_addr' field is zero and whose `name' is null.
    +
    +  For this reason, the root object should always be read through the
    +  H5G interface.
    +
    +One more key invariant:  The H5O_STAB message in a symbol table header
    +never changes.  This allows symbol table entries to cache the H5O_STAB
    +message for the symbol table to which it points without worrying about
    +whether the cache will ever be invalidated.
    +
    +
    +===========================================
    +Last Modified:  8 July 1998 (technical content)
    +Last Modified:  28 April 2000 (included in HDF5 Technical Notes)
    +HDF Help Desk:  hdfhelp@ncsa.uiuc.edu
    +
    +
    + + + diff --git a/doc/html/TechNotes/Version.html b/doc/html/TechNotes/Version.html new file mode 100644 index 0000000..0e0853b --- /dev/null +++ b/doc/html/TechNotes/Version.html @@ -0,0 +1,137 @@ + + + + Version Numbers + + + +

    HDF5 Release Version Numbers

    + +

    1. Introduction

    + +

    The HDF5 version number is a set of three integer values + written as either hdf5-1.2.3 or hdf5 version + 1.2 release 3. + +

    The 5 is part of the library name and will only + change if the entire file format and library are redesigned + similar in scope to the changes between HDF4 and HDF5. + +

    The 1 is the major version number and + changes when there is an extensive change to the file format or + library API. Such a change will likely require files to be + translated and applications to be modified. This number is not + expected to change frequently. + +

    The 2 is the minor version number and is + incremented by each public release that presents new features. + Even numbers are reserved for stable public versions of the + library while odd numbers are reserved for developement + versions. See the diagram below for examples. + +

    The 3 is the release number. For public + versions of the library, the release number is incremented each + time a bug is fixed and the fix is made available to the public. + For development versions, the release number is incremented more + often (perhaps almost daily). + +

    2. Abbreviated Versions

    + +

    It's often convenient to drop the release number when referring + to a version of the library, like saying version 1.2 of HDF5. + The release number can be any value in this case. + +

    3. Special Versions

    + +

    Version 1.0.0 was released for alpha testing the first week of + March, 1998. The developement version number was incremented to + 1.0.1 and remained constant until the the last week of April, + when the release number started to increase and development + versions were made available to people outside the core HDF5 + development team. + +

    Version 1.0.23 was released mid-July as a second alpha + version. + +

    Version 1.1.0 will be the first official beta release but the + 1.1 branch will also serve as a development branch since we're + not concerned about providing bug fixes separate from normal + development for the beta version. + +

    After the beta release we rolled back the version number so the + first release is version 1.0 and development will continue on + version 1.1. We felt that an initial version of 1.0 was more + important than continuing to increment the pre-release version + numbers. + +

    4. Public versus Development

    + +

    The motivation for separate public and development versions is + that the public version will receive only bug fixes while the + development version will receive new features. This also allows + us to release bug fixes expediently without waiting for the + development version to reach a stable state. + +

    Eventually, the development version will near completion and a + new development branch will fork while the original one enters a + feature freeze state. When the original development branch is + ready for release the minor version number will be incremented + to an even value. + +

    +

    + Version Example +
    Fig 1: Version Example +
    + +

    5. Version Support from the Library

    + +

    The library provides a set of macros and functions to query and + check version numbers. + +

    +
    H5_VERS_MAJOR +
    H5_VERS_MINOR +
    H5_VERS_RELEASE +
    These preprocessor constants are defined in the public + include file and determine the version of the include files. + +

    +
    herr_t H5get_libversion (unsigned *majnum, unsigned + *minnum, unsigned *relnum) +
    This function returns through its arguments the version + numbers for the library to which the application is linked. + +

    +
    void H5check(void) +
    This is a macro that verifies that the version number of the + HDF5 include file used to compile the application matches the + version number of the library to which the application is + linked. This check occurs automatically when the first HDF5 + file is created or opened and is important because a mismatch + between the include files and the library is likely to result + in corrupted data and/or segmentation faults. If a mismatch + is detected the library issues an error message on the + standard error stream and aborts with a core dump. + +

    +
    herr_t H5check_version (unsigned majnum, + unsigned minnum, unsigned relnum) +
    This function is called by the H5check() macro + with the include file version constants. The function + compares its arguments to the result returned by + H5get_libversion() and if a mismatch is detected prints + an error message on the standard error stream and aborts. +
    + +
    +
    HDF Help Desk
    +
    + + + +Last modified: Fri Oct 30 10:32:50 EST 1998 + + + + diff --git a/doc/html/TechNotes/pipe1.gif b/doc/html/TechNotes/pipe1.gif new file mode 100644 index 0000000..3b489a6 Binary files /dev/null and b/doc/html/TechNotes/pipe1.gif differ diff --git a/doc/html/TechNotes/pipe1.obj b/doc/html/TechNotes/pipe1.obj new file mode 100644 index 0000000..41f3461 --- /dev/null +++ b/doc/html/TechNotes/pipe1.obj @@ -0,0 +1,136 @@ +%TGIF 3.0-p5 +state(1,33,100,0,0,0,8,1,9,1,1,0,0,0,0,1,1,'Helvetica',0,17,0,0,0,10,0,0,1,1,0,16,0,0,1,1,1,0,1408,1088,0,0,2880). +% +% @(#)$Header$ +% %W% +% +unit("1 pixel/pixel"). +page(1,"",1). +box('black',64,64,128,256,0,1,1,22,0,0,0,0,0,'1',[ +]). +box('black',80,96,112,224,26,1,1,23,0,0,0,0,0,'1',[ +]). +poly('black',2,[ + 128,160,912,160],1,2,1,24,0,0,0,0,10,4,0,0,0,'2','10','4', + "0",[ +]). +poly('black',5,[ + 160,160,144,224,160,272,176,224,160,160],1,2,1,25,2,0,0,0,10,4,0,0,0,'2','10','4', + "",[ +]). +poly('black',5,[ + 848,160,832,224,848,272,864,224,848,160],1,2,1,34,2,0,0,0,10,4,0,0,0,'2','10','4', + "",[ +]). +box('black',464,192,496,256,26,1,1,39,0,0,0,0,0,'1',[ +]). +poly('black',2,[ + 160,224,464,224],1,2,1,40,0,26,0,0,10,4,0,0,0,'2','10','4', + "0",[ +]). +poly('black',2,[ + 496,224,848,224],1,2,1,41,0,26,0,0,10,4,0,0,0,'2','10','4', + "0",[ +]). +poly('black',5,[ + 192,224,176,288,192,336,208,288,192,224],1,2,1,42,2,0,0,0,10,4,0,0,0,'2','10','4', + "",[ +]). +poly('black',5,[ + 432,224,416,288,432,336,448,288,432,224],1,2,1,43,2,0,0,0,10,4,0,0,0,'2','10','4', + "",[ +]). +poly('black',2,[ + 192,288,432,288],1,2,1,44,0,26,0,0,10,4,0,0,0,'2','10','4', + "0",[ +]). +box('black',464,352,496,416,26,1,1,45,0,0,0,0,0,'1',[ +]). +poly('black',5,[ + 528,224,512,288,528,336,544,288,528,224],1,2,1,46,2,0,0,0,10,4,0,0,0,'2','10','4', + "",[ +]). +poly('black',5,[ + 816,224,800,288,816,336,832,288,816,224],1,2,1,47,2,0,0,0,10,4,0,0,0,'2','10','4', + "",[ +]). +poly('black',2,[ + 528,288,816,288],1,2,1,48,0,26,0,0,10,4,0,0,0,'2','10','4', + "0",[ +]). +poly('black',5,[ + 464,256,456,304,464,328,488,304,488,256],1,2,1,62,2,0,0,0,10,4,0,0,0,'2','10','4', + "",[ +]). +poly('black',2,[ + 480,352,488,304],2,2,1,85,0,0,0,0,10,4,0,0,0,'2','10','4', + "0",[ +]). +box('black',912,64,976,256,0,1,1,87,0,0,0,0,0,'1',[ +]). +box('black',928,96,960,224,26,1,1,88,0,0,0,0,0,'1',[ +]). +text('black',96,48,'Helvetica',0,17,1,1,0,1,21,15,89,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "File"]). +text('black',944,48,'Helvetica',0,17,1,1,0,1,64,15,93,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "Application"]). +text('black',480,144,'Helvetica',0,17,1,1,0,1,65,15,99,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "H5D_read()"]). +text('black',480,128,'Helvetica',0,17,1,1,0,1,58,15,108,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "H5Dread()"]). +text('black',304,208,'Helvetica',0,17,1,1,0,1,86,15,115,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "H5F_arr_read()"]). +text('black',304,192,'Helvetica',0,17,1,1,0,1,99,15,119,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "H5S_simp_fgath()"]). +text('black',296,288,'Helvetica',0,17,1,1,0,1,101,15,125,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "H5F_block_read()"]). +text('black',296,304,'Helvetica',0,17,1,1,0,1,90,15,132,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "H5F_low_read()"]). +text('black',296,320,'Helvetica',0,17,1,1,0,1,98,15,136,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "H5F_sec2_read()"]). +text('black',296,336,'Helvetica',0,17,1,1,0,1,33,15,140,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "read()"]). +text('black',664,208,'Helvetica',0,17,1,1,0,1,106,15,146,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "H5V_stride_copy()"]). +text('black',664,176,'Helvetica',0,17,1,1,0,1,104,15,150,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "H5S_simp_mscat()"]). +text('black',664,272,'Helvetica',0,17,1,1,0,1,54,15,154,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "memcpy()"]). +text('black',384,392,'Helvetica',0,17,1,1,0,1,105,15,170,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "H5T_conv_struct()"]). +poly('black',4,[ + 392,384,400,352,440,368,456,336],1,1,1,172,1,0,0,0,8,3,0,0,0,'1','8','3', + "6",[ +]). +text('black',480,176,'Helvetica',0,17,1,1,0,1,44,15,176,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "TCONV"]). +text('black',480,416,'Helvetica',0,17,1,1,0,1,25,15,182,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "BKG"]). +box('black',48,32,992,512,0,1,1,186,0,0,0,0,0,'1',[ +]). +poly('black',5,[ + 72,392,56,456,72,504,88,456,72,392],1,2,1,188,2,0,0,0,10,4,0,0,0,'2','10','4', + "",[ +]). +text('black',96,448,'Helvetica',0,17,1,0,0,1,46,15,189,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "== Loop"]). +poly('black',3,[ + 48,384,152,384,152,512],0,1,1,191,0,0,0,0,8,3,0,0,0,'1','8','3', + "0",[ +]). +text('black',480,40,'Helvetica',0,24,1,1,0,1,380,29,197,0,24,5,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "Fig 1: Internal Contiguous Storage"]). +text('black',136,144,'Helvetica',0,17,1,1,0,1,9,15,201,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "A"]). +text('black',160,208,'Helvetica',0,17,1,1,0,1,8,15,207,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "B"]). +text('black',192,272,'Helvetica',0,17,1,1,0,1,9,15,211,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "C"]). +text('black',504,208,'Helvetica',0,17,1,1,0,1,8,15,215,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "E"]). +text('black',528,272,'Helvetica',0,17,1,1,0,1,8,15,223,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "F"]). +text('black',464,304,'Helvetica',0,17,1,1,0,1,9,15,231,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "D"]). +text('black',664,192,'Helvetica',0,17,1,1,0,1,107,15,324,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "H5V_hyper_copy()"]). diff --git a/doc/html/TechNotes/pipe2.gif b/doc/html/TechNotes/pipe2.gif new file mode 100644 index 0000000..3a0c947 Binary files /dev/null and b/doc/html/TechNotes/pipe2.gif differ diff --git a/doc/html/TechNotes/pipe2.obj b/doc/html/TechNotes/pipe2.obj new file mode 100644 index 0000000..70d9c18 --- /dev/null +++ b/doc/html/TechNotes/pipe2.obj @@ -0,0 +1,168 @@ +%TGIF 3.0-p5 +state(1,33,100,0,0,0,8,1,9,1,1,1,1,0,0,1,1,'Helvetica',0,17,0,0,0,10,0,0,1,1,0,16,0,0,1,1,1,0,1408,1088,0,0,2880). +% +% @(#)$Header$ +% %W% +% +unit("1 pixel/pixel"). +page(1,"",1). +box('black',64,64,128,256,0,1,1,22,0,0,0,0,0,'1',[ +]). +box('black',80,96,112,224,26,1,1,23,0,0,0,0,0,'1',[ +]). +poly('black',2,[ + 128,160,912,160],1,2,1,24,0,0,0,0,10,4,0,0,0,'2','10','4', + "0",[ +]). +poly('black',5,[ + 160,160,144,224,160,272,176,224,160,160],1,2,1,25,2,0,0,0,10,4,0,0,0,'2','10','4', + "",[ +]). +poly('black',5,[ + 848,160,832,224,848,272,864,224,848,160],1,2,1,34,2,0,0,0,10,4,0,0,0,'2','10','4', + "",[ +]). +box('black',464,192,496,256,26,1,1,39,0,0,0,0,0,'1',[ +]). +poly('black',2,[ + 160,224,464,224],1,2,1,40,0,26,0,0,10,4,0,0,0,'2','10','4', + "0",[ +]). +poly('black',2,[ + 496,224,848,224],1,2,1,41,0,26,0,0,10,4,0,0,0,'2','10','4', + "0",[ +]). +poly('black',5,[ + 192,224,176,288,192,336,208,288,192,224],1,2,1,42,2,0,0,0,10,4,0,0,0,'2','10','4', + "",[ +]). +poly('black',5,[ + 432,224,416,288,432,336,448,288,432,224],1,2,1,43,2,0,0,0,10,4,0,0,0,'2','10','4', + "",[ +]). +poly('black',2,[ + 192,288,432,288],1,2,1,44,0,26,0,0,10,4,0,0,0,'2','10','4', + "0",[ +]). +box('black',464,352,496,416,26,1,1,45,0,0,0,0,0,'1',[ +]). +poly('black',5,[ + 528,224,512,288,528,336,544,288,528,224],1,2,1,46,2,0,0,0,10,4,0,0,0,'2','10','4', + "",[ +]). +poly('black',5,[ + 816,224,800,288,816,336,832,288,816,224],1,2,1,47,2,0,0,0,10,4,0,0,0,'2','10','4', + "",[ +]). +poly('black',2,[ + 528,288,816,288],1,2,1,48,0,26,0,0,10,4,0,0,0,'2','10','4', + "0",[ +]). +poly('black',5,[ + 848,240,848,352,832,384,800,384,496,384],1,2,1,55,1,0,0,0,10,4,0,0,0,'2','10','4', + "70",[ +]). +poly('black',5,[ + 528,384,512,448,528,496,544,448,528,384],1,2,1,57,2,0,0,0,10,4,0,0,0,'2','10','4', + "",[ +]). +poly('black',5,[ + 800,384,784,448,800,496,816,448,800,384],1,2,1,58,2,0,0,0,10,4,0,0,0,'2','10','4', + "",[ +]). +poly('black',2,[ + 800,448,528,448],1,2,1,61,0,0,0,0,10,4,0,0,0,'2','10','4', + "0",[ +]). +poly('black',5,[ + 464,256,456,304,464,328,488,304,488,256],1,2,1,62,2,0,0,0,10,4,0,0,0,'2','10','4', + "",[ +]). +poly('black',2,[ + 480,352,488,304],0,2,1,85,0,0,0,0,10,4,0,0,0,'2','10','4', + "0",[ +]). +box('black',912,64,976,256,0,1,1,87,0,0,0,0,0,'1',[ +]). +box('black',928,96,960,224,26,1,1,88,0,0,0,0,0,'1',[ +]). +text('black',96,48,'Helvetica',0,17,1,1,0,1,21,15,89,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "File"]). +text('black',944,48,'Helvetica',0,17,1,1,0,1,64,15,93,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "Application"]). +text('black',480,144,'Helvetica',0,17,1,1,0,1,65,15,99,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "H5D_read()"]). +text('black',480,128,'Helvetica',0,17,1,1,0,1,58,15,108,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "H5Dread()"]). +text('black',304,208,'Helvetica',0,17,1,1,0,1,86,15,115,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "H5F_arr_read()"]). +text('black',304,192,'Helvetica',0,17,1,1,0,1,99,15,119,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "H5S_simp_fgath()"]). +text('black',296,288,'Helvetica',0,17,1,1,0,1,101,15,125,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "H5F_block_read()"]). +text('black',296,304,'Helvetica',0,17,1,1,0,1,90,15,132,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "H5F_low_read()"]). +text('black',296,320,'Helvetica',0,17,1,1,0,1,98,15,136,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "H5F_sec2_read()"]). +text('black',296,336,'Helvetica',0,17,1,1,0,1,33,15,140,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "read()"]). +text('black',664,208,'Helvetica',0,17,1,1,0,1,106,15,146,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "H5V_stride_copy()"]). +text('black',664,176,'Helvetica',0,17,1,1,0,1,104,15,150,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "H5S_simp_mscat()"]). +text('black',664,272,'Helvetica',0,17,1,1,0,1,54,15,154,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "memcpy()"]). +text('black',672,368,'Helvetica',0,17,1,1,0,1,106,15,158,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "H5V_stride_copy()"]). +text('black',672,336,'Helvetica',0,17,1,1,0,1,105,15,162,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "H5S_simp_mgath()"]). +text('black',672,432,'Helvetica',0,17,1,1,0,1,54,15,166,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "memcpy()"]). +text('black',384,392,'Helvetica',0,17,1,1,0,1,105,15,170,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "H5T_conv_struct()"]). +poly('black',4,[ + 392,384,400,352,440,368,456,336],1,1,1,172,1,0,0,0,8,3,0,0,0,'1','8','3', + "6",[ +]). +text('black',480,176,'Helvetica',0,17,1,1,0,1,44,15,176,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "TCONV"]). +text('black',480,416,'Helvetica',0,17,1,1,0,1,25,15,182,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "BKG"]). +box('black',48,32,992,512,0,1,1,186,0,0,0,0,0,'1',[ +]). +poly('black',5,[ + 72,392,56,456,72,504,88,456,72,392],1,2,1,188,2,0,0,0,10,4,0,0,0,'2','10','4', + "",[ +]). +text('black',96,448,'Helvetica',0,17,1,0,0,1,46,15,189,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "== Loop"]). +poly('black',3,[ + 48,384,152,384,152,512],0,1,1,191,0,0,0,0,8,3,0,0,0,'1','8','3', + "0",[ +]). +text('black',480,40,'Helvetica',0,24,1,1,0,1,404,29,197,0,24,5,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "Fig 2: Partially Initialized Destination"]). +text('black',136,144,'Helvetica',0,17,1,1,0,1,9,15,201,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "A"]). +text('black',160,208,'Helvetica',0,17,1,1,0,1,8,15,207,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "B"]). +text('black',192,272,'Helvetica',0,17,1,1,0,1,9,15,211,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "C"]). +text('black',504,208,'Helvetica',0,17,1,1,0,1,8,15,215,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "E"]). +text('black',528,272,'Helvetica',0,17,1,1,0,1,8,15,223,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "F"]). +text('black',856,288,'Helvetica',0,17,1,1,0,1,9,15,225,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "G"]). +text('black',800,432,'Helvetica',0,17,1,1,0,1,9,15,229,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "H"]). +text('black',464,304,'Helvetica',0,17,1,1,0,1,9,15,231,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "D"]). +poly('black',4,[ + 848,240,848,224,864,224,904,224],0,2,1,318,1,0,0,0,10,4,0,0,0,'2','10','4', + "6",[ +]). +text('black',664,192,'Helvetica',0,17,1,1,0,1,107,15,326,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "H5V_hyper_copy()"]). +text('black',672,352,'Helvetica',0,17,1,1,0,1,107,15,334,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "H5V_hyper_copy()"]). diff --git a/doc/html/TechNotes/pipe3.gif b/doc/html/TechNotes/pipe3.gif new file mode 100644 index 0000000..26d82ad Binary files /dev/null and b/doc/html/TechNotes/pipe3.gif differ diff --git a/doc/html/TechNotes/pipe3.obj b/doc/html/TechNotes/pipe3.obj new file mode 100644 index 0000000..cdfef7c --- /dev/null +++ b/doc/html/TechNotes/pipe3.obj @@ -0,0 +1,70 @@ +%TGIF 3.0-p5 +state(1,33,100,0,0,0,8,1,9,1,1,0,0,0,0,1,1,'Helvetica',0,17,0,0,0,10,0,0,1,1,0,16,0,0,1,1,1,0,1408,1088,0,0,2880). +% +% @(#)$Header$ +% %W% +% +unit("1 pixel/pixel"). +page(1,"",1). +box('black',64,64,128,256,0,1,1,22,0,0,0,0,0,'1',[ +]). +box('black',80,96,112,224,26,1,1,23,0,0,0,0,0,'1',[ +]). +poly('black',2,[ + 128,160,912,160],1,2,1,24,0,0,0,0,10,4,0,0,0,'2','10','4', + "0",[ +]). +box('black',912,64,976,256,0,1,1,87,0,0,0,0,0,'1',[ +]). +box('black',928,96,960,224,26,1,1,88,0,0,0,0,0,'1',[ +]). +text('black',96,48,'Helvetica',0,17,1,1,0,1,21,15,89,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "File"]). +text('black',944,48,'Helvetica',0,17,1,1,0,1,64,15,93,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "Application"]). +text('black',480,104,'Helvetica',0,17,1,1,0,1,65,15,99,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "H5D_read()"]). +text('black',480,88,'Helvetica',0,17,1,1,0,1,58,15,108,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "H5Dread()"]). +box('black',48,32,992,512,0,1,1,186,0,0,0,0,0,'1',[ +]). +poly('black',5,[ + 72,392,56,456,72,504,88,456,72,392],1,2,1,188,2,0,0,0,10,4,0,0,0,'2','10','4', + "",[ +]). +text('black',96,448,'Helvetica',0,17,1,0,0,1,46,15,189,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "== Loop"]). +poly('black',3,[ + 48,384,152,384,152,512],0,1,1,191,0,0,0,0,8,3,0,0,0,'1','8','3', + "0",[ +]). +text('black',480,40,'Helvetica',0,24,1,1,0,1,295,29,197,0,24,5,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "Fig 3: No Type Conversion"]). +text('black',136,144,'Helvetica',0,17,1,1,0,1,9,15,201,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "A"]). +poly('black',5,[ + 152,160,136,224,152,272,168,224,152,160],1,2,1,273,2,0,0,0,10,4,0,0,0,'2','10','4', + "",[ +]). +text('black',480,120,'Helvetica',0,17,1,1,0,1,96,15,277,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "H5S_simp_read()"]). +text('black',480,136,'Helvetica',0,17,1,1,0,1,86,15,281,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "H5F_arr_read()"]). +poly('black',5,[ + 880,160,864,224,880,272,896,224,880,160],1,2,1,283,2,0,0,0,10,4,0,0,0,'2','10','4', + "",[ +]). +poly('black',2,[ + 152,224,880,224],1,2,1,286,0,0,0,0,10,4,0,0,0,'2','10','4', + "0",[ +]). +text('black',480,232,'Helvetica',0,17,1,1,0,1,101,15,291,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "H5F_block_read()"]). +text('black',480,248,'Helvetica',0,17,1,1,0,1,90,15,293,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "H5F_low_read()"]). +text('black',480,264,'Helvetica',0,17,1,1,0,1,98,15,309,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "H5F_sec2_read()"]). +text('black',480,280,'Helvetica',0,17,1,1,0,1,33,15,311,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "read()"]). +text('black',176,208,'Helvetica',0,17,1,1,0,1,8,15,418,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "B"]). diff --git a/doc/html/TechNotes/pipe4.gif b/doc/html/TechNotes/pipe4.gif new file mode 100644 index 0000000..a3a857b Binary files /dev/null and b/doc/html/TechNotes/pipe4.gif differ diff --git a/doc/html/TechNotes/pipe4.obj b/doc/html/TechNotes/pipe4.obj new file mode 100644 index 0000000..6f50123 --- /dev/null +++ b/doc/html/TechNotes/pipe4.obj @@ -0,0 +1,92 @@ +%TGIF 3.0-p5 +state(1,33,100,0,0,0,8,1,9,1,1,1,2,1,0,1,0,'Helvetica',0,17,0,0,0,10,0,0,1,1,0,16,0,0,1,1,1,0,1408,1088,0,0,2880). +% +% @(#)$Header$ +% %W% +% +unit("1 pixel/pixel"). +page(1,"",1). +box('black',64,64,128,256,0,1,1,22,0,0,0,0,0,'1',[ +]). +box('black',80,96,112,224,26,1,1,23,0,0,0,0,0,'1',[ +]). +poly('black',2,[ + 128,160,912,160],1,2,1,24,0,0,0,0,10,4,0,0,0,'2','10','4', + "0",[ +]). +box('black',912,96,944,224,26,1,1,88,0,0,0,0,0,'1',[ +]). +text('black',96,48,'Helvetica',0,17,1,1,0,1,21,15,89,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "File"]). +text('black',928,72,'Helvetica',0,17,1,1,0,1,32,15,93,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "Buffer"]). +box('black',48,32,992,512,0,1,1,186,0,0,0,0,0,'1',[ +]). +poly('black',5,[ + 72,392,56,456,72,504,88,456,72,392],1,2,1,188,2,0,0,0,10,4,0,0,0,'2','10','4', + "",[ +]). +text('black',96,448,'Helvetica',0,17,1,0,0,1,46,15,189,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "== Loop"]). +poly('black',3,[ + 48,384,152,384,152,512],0,1,1,191,0,0,0,0,8,3,0,0,0,'1','8','3', + "0",[ +]). +text('black',480,40,'Helvetica',0,24,1,1,0,1,372,29,197,0,24,5,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "Fig 4: Regularly Chunked Storage"]). +text('black',136,144,'Helvetica',0,17,1,1,0,1,9,15,201,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "A"]). +text('black',480,104,'Helvetica',0,17,1,1,0,1,86,15,281,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "H5F_arr_read()"]). +text('black',480,120,'Helvetica',0,17,1,1,0,1,102,15,349,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "H5F_istore_read()"]). +text('black',480,136,'Helvetica',0,17,1,1,0,1,167,15,351,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "H5F_istore_copy_hyperslab()"]). +poly('black',5,[ + 160,160,144,224,160,272,176,224,160,160],1,2,1,362,2,0,0,0,10,4,0,0,0,'2','10','4', + "",[ +]). +poly('black',5,[ + 880,160,864,224,880,272,896,224,880,160],1,2,1,363,2,0,0,0,10,4,0,0,0,'2','10','4', + "",[ +]). +box('black',448,192,512,256,26,1,1,364,0,0,0,0,0,'1',[ +]). +text('black',480,176,'Helvetica',0,17,1,1,0,1,43,15,367,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "CHUNK"]). +poly('black',2,[ + 160,224,448,224],1,2,1,372,0,0,0,0,10,4,0,0,0,'2','10','4', + "0",[ +]). +poly('black',2,[ + 512,224,880,224],1,2,1,373,0,0,0,0,10,4,0,0,0,'2','10','4', + "0",[ +]). +text('black',288,224,'Helvetica',0,17,1,1,0,1,101,15,385,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "H5F_block_read()"]). +text('black',288,240,'Helvetica',0,17,1,1,0,1,90,15,387,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "H5F_low_read()"]). +text('black',288,256,'Helvetica',0,17,1,1,0,1,98,15,391,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "H5F_sec2_read()"]). +text('black',288,272,'Helvetica',0,17,1,1,0,1,33,15,395,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "read()"]). +poly('black',5,[ + 456,256,448,296,480,320,512,296,504,256],1,2,1,401,2,0,0,0,10,4,0,0,0,'2','10','4', + "",[ +]). +text('black',184,208,'Helvetica',0,17,1,1,0,1,8,15,422,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "B"]). +text('black',520,208,'Helvetica',0,17,1,1,0,1,9,15,434,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "D"]). +text('black',440,272,'Helvetica',0,17,1,1,0,1,9,15,440,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "C"]). +text('black',480,320,'Helvetica',0,17,1,1,0,1,107,15,444,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "H5Z_uncompress()"]). +text('black',672,224,'Helvetica',0,17,1,1,0,1,107,15,454,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "H5V_hyper_copy()"]). +text('black',672,240,'Helvetica',0,17,1,1,0,1,106,15,464,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "H5V_stride_copy()"]). +text('black',672,256,'Helvetica',0,17,1,1,0,1,54,15,466,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "memcpy()"]). +text('black',168,488,'Helvetica',0,17,1,0,0,1,282,15,471,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "NOTE: H5Z_uncompress() is not implemented yet."]). diff --git a/doc/html/TechNotes/pipe5.gif b/doc/html/TechNotes/pipe5.gif new file mode 100644 index 0000000..6ae0098 Binary files /dev/null and b/doc/html/TechNotes/pipe5.gif differ diff --git a/doc/html/TechNotes/pipe5.obj b/doc/html/TechNotes/pipe5.obj new file mode 100644 index 0000000..4738bbd --- /dev/null +++ b/doc/html/TechNotes/pipe5.obj @@ -0,0 +1,52 @@ +%TGIF 3.0-p5 +state(1,33,100,0,0,0,8,1,9,1,1,1,2,1,0,1,0,'Helvetica',0,17,0,0,0,10,0,0,1,1,0,16,0,0,1,1,1,0,1408,1088,0,0,2880). +% +% @(#)$Header$ +% %W% +% +unit("1 pixel/pixel"). +page(1,"",1). +box('black',64,64,128,256,0,1,1,22,0,0,0,0,0,'1',[ +]). +box('black',80,96,112,224,26,1,1,23,0,0,0,0,0,'1',[ +]). +poly('black',2,[ + 128,160,912,160],1,2,1,24,0,0,0,0,10,4,0,0,0,'2','10','4', + "0",[ +]). +box('black',912,96,944,224,26,1,1,88,0,0,0,0,0,'1',[ +]). +text('black',96,48,'Helvetica',0,17,1,1,0,1,21,15,89,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "File"]). +text('black',928,72,'Helvetica',0,17,1,1,0,1,32,15,93,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "Buffer"]). +box('black',48,32,992,512,0,1,1,186,0,0,0,0,0,'1',[ +]). +text('black',480,40,'Helvetica',0,24,1,1,0,1,333,29,197,0,24,5,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "Fig 5: Reading a Single Chunk"]). +text('black',136,144,'Helvetica',0,17,1,1,0,1,9,15,201,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "A"]). +text('black',480,112,'Helvetica',0,17,1,1,0,1,86,15,281,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "H5F_arr_read()"]). +text('black',480,128,'Helvetica',0,17,1,1,0,1,102,15,349,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "H5F_istore_read()"]). +text('black',480,144,'Helvetica',0,17,1,1,0,1,167,15,351,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "H5F_istore_copy_hyperslab()"]). +text('black',480,160,'Helvetica',0,17,1,1,0,1,101,15,385,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "H5F_block_read()"]). +text('black',480,176,'Helvetica',0,17,1,1,0,1,90,15,387,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "H5F_low_read()"]). +text('black',480,192,'Helvetica',0,17,1,1,0,1,98,15,391,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "H5F_sec2_read()"]). +text('black',480,208,'Helvetica',0,17,1,1,0,1,33,15,395,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "read()"]). +text('black',864,240,'Helvetica',0,17,1,1,0,1,107,15,444,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "H5Z_uncompress()"]). +text('black',56,488,'Helvetica',0,17,1,0,0,1,282,15,471,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "NOTE: H5Z_uncompress() is not implemented yet."]). +poly('black',5,[ + 912,176,864,176,840,208,872,232,912,216],1,2,1,490,2,0,0,0,10,4,0,0,0,'2','10','4', + "",[ +]). +text('black',896,184,'Helvetica',0,17,1,0,0,1,8,15,491,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "B"]). diff --git a/doc/html/TechNotes/version.gif b/doc/html/TechNotes/version.gif new file mode 100644 index 0000000..41d4401 Binary files /dev/null and b/doc/html/TechNotes/version.gif differ diff --git a/doc/html/TechNotes/version.obj b/doc/html/TechNotes/version.obj new file mode 100644 index 0000000..96b5b7f --- /dev/null +++ b/doc/html/TechNotes/version.obj @@ -0,0 +1,96 @@ +%TGIF 3.0-p5 +state(0,33,100,0,0,0,8,1,9,1,1,0,2,1,0,1,0,'Courier',0,17,0,0,0,10,0,0,1,1,0,16,0,0,1,1,1,0,1088,1408,0,0,2880). +% +% @(#)$Header$ +% %W% +% +unit("1 pixel/pixel"). +page(1,"",1). +poly('black',2,[ + 128,128,128,448],0,3,1,0,0,0,0,0,12,5,0,0,0,'3','12','5', + "0",[ +]). +poly('black',2,[ + 128,128,128,64],0,3,1,1,0,0,2,0,12,5,0,0,0,'3','12','5', + "0",[ +]). +poly('black',2,[ + 128,448,128,512],0,3,1,4,0,0,2,0,12,5,0,0,0,'3','12','5', + "0",[ +]). +text('black',144,112,'Courier',0,17,1,0,0,1,42,14,22,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "1.3.30"]). +text('black',144,144,'Courier',0,17,1,0,0,1,42,14,30,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "1.3.31"]). +text('black',144,176,'Courier',0,17,1,0,0,1,42,14,32,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "1.3.32"]). +poly('black',2,[ + 256,208,256,448],0,3,1,34,0,0,0,0,12,5,0,0,0,'3','12','5', + "0",[ +]). +poly('black',2,[ + 256,448,256,512],0,3,1,36,0,0,2,0,12,5,0,0,0,'3','12','5', + "0",[ +]). +poly('black',2,[ + 128,192,256,208],1,1,1,37,0,0,0,0,8,3,0,0,0,'1','8','3', + "0",[ +]). +text('black',144,224,'Courier',0,17,1,0,0,1,42,14,41,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "1.3.33"]). +text('black',144,256,'Courier',0,17,1,0,0,1,42,14,43,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "1.3.34"]). +text('black',272,224,'Courier',0,17,1,0,0,1,35,14,45,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "1.5.0"]). +text('black',272,256,'Courier',0,17,1,0,0,1,35,14,47,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "1.5.1"]). +text('black',272,288,'Courier',0,17,1,0,0,1,35,14,49,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "1.5.2"]). +text('black',272,320,'Courier',0,17,1,0,0,1,35,14,51,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "1.5.3"]). +text('black',144,288,'Courier',0,17,1,0,0,1,42,14,53,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "1.3.35"]). +text('black',144,320,'Courier',0,17,1,0,0,1,35,14,57,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "1.4.0"]). +text('black',144,368,'Courier',0,17,1,0,0,1,35,14,59,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "1.4.1"]). +text('black',272,192,'Helvetica',0,17,1,0,0,1,144,15,67,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "New development branch"]). +text('black',144,64,'Helvetica',0,17,1,0,0,1,163,15,69,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "Original development branch"]). +text('black',16,208,'Helvetica',0,17,2,0,0,1,87,30,71,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "Feature Freeze", + "at this point."]). +text('black',16,320,'Helvetica',0,17,2,0,0,1,84,30,73,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "Public Release", + "at this point."]). +poly('black',2,[ + 104,208,128,208],1,1,1,77,0,0,0,0,8,3,0,0,0,'1','8','3', + "0",[ +]). +poly('black',2,[ + 104,320,128,320],1,1,1,78,0,0,0,0,8,3,0,0,0,'1','8','3', + "0",[ +]). +poly('black',2,[ + 256,336,128,352],1,1,1,79,0,0,0,0,8,3,0,0,0,'1','8','3', + "0",[ +]). +text('black',320,368,'Helvetica',0,17,3,0,0,1,137,45,82,0,12,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "Merge a bug fix from the", + "development branch to", + "the release branch."]). +box('black',312,368,464,416,0,1,1,87,0,0,0,0,0,'1',[ +]). +poly('black',4,[ + 312,392,240,384,296,344,232,344],1,1,1,90,1,0,0,0,8,3,0,0,0,'1','8','3', + "6",[ +]). +box('black',8,208,104,240,0,1,1,95,0,0,0,0,0,'1',[ +]). +box('black',8,320,104,352,0,1,1,98,0,0,0,0,0,'1',[ +]). +text('black',144,408,'Courier',0,17,1,0,0,1,35,14,102,0,11,3,0,0,0,0,0,2,0,0,0,0,"",0,0,0,[ + "1.4.2"]). +box('black',0,40,480,528,0,1,1,104,0,0,0,0,0,'1',[ +]). -- cgit v0.12