summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorRobb Matzke <matzke@llnl.gov>1998-05-22 15:05:53 (GMT)
committerRobb Matzke <matzke@llnl.gov>1998-05-22 15:05:53 (GMT)
commitd392756a1b6e28ff5a28e80c5c26513e8ef54e69 (patch)
tree00479cc9a7baba6c7e8e68d375f79221729751d4 /src
parent57e57ebb14aa3f5a88245965292031f25dfc7756 (diff)
downloadhdf5-d392756a1b6e28ff5a28e80c5c26513e8ef54e69.zip
hdf5-d392756a1b6e28ff5a28e80c5c26513e8ef54e69.tar.gz
hdf5-d392756a1b6e28ff5a28e80c5c26513e8ef54e69.tar.bz2
[svn-r400] Changes since 19980513
---------------------- ./html/Datasets.html Fixed a couple of typos. ./src/H5.c Added the `Z' modifier to HDfprintf() for `size_t' sizes. Use it like this: HDfprintf(stderr,"size is %Zd\n", (size_t)x); ./src/H5AC.c ./src/H5F.c ./src/H5Fprivate.h The maximum number of meta data objects that can be cached can be set from the application (but the library might not honor it every time; it's a hint). ./src/H5D.c Changed a warning message so it's not so alarming. ./src/H5Fistore.c Chunks can be cached. ./src/H5O.c ./src/H5Oprivate.h Added H5O_copy() and H5O_free() to copy and free messages. ./src/H5P.c ./src/H5Ppublic.h Added H5Pset_cache() and H5Pget_cache() and changed lots of "template" to "property list". ./src/H5Z.c ./src/H5Zpublic.h Miscellaneous little things to clean up. Mostly just removed H5Z_MAXVAL and added H5Z_USERDEF_MIN and H5Z_USERDEF_MAX. ./MANIFEST ./test/Makefile.in ./test/chunk.c [NEW] Added a performance test for chunk caching. It looks at the amount of I/O instead of timing because timing is partly dependent on the chunk size and I wanted a measurement that was a function of only the cache size. Run `chunk' with no arguments and then say `gnuplot x-gnuplot' to see the plots (press return between plots). Postscript files are created for each plot. ./test/big.c ./test/cmpd_dset.c ./test/extend.c ./test/external.c ./test/gheap.c Added H5F_ACC_DEBUG so we can see cache performance statistics.
Diffstat (limited to 'src')
-rw-r--r--src/H5.c12
-rw-r--r--src/H5AC.c19
-rw-r--r--src/H5D.c8
-rw-r--r--src/H5Distore.c934
-rw-r--r--src/H5F.c83
-rw-r--r--src/H5Fistore.c934
-rw-r--r--src/H5Fprivate.h20
-rw-r--r--src/H5MM.c130
-rw-r--r--src/H5O.c77
-rw-r--r--src/H5Oprivate.h2
-rw-r--r--src/H5P.c292
-rw-r--r--src/H5Ppublic.h4
-rw-r--r--src/H5Z.c16
-rw-r--r--src/H5Zpublic.h35
-rw-r--r--src/Makefile.in8
15 files changed, 1942 insertions, 632 deletions
diff --git a/src/H5.c b/src/H5.c
index 4e48221..68838b2 100644
--- a/src/H5.c
+++ b/src/H5.c
@@ -535,7 +535,7 @@ HDfprintf (FILE *stream, const char *fmt, ...)
}
/* Type modifier */
- if (strchr ("Hhlq", *s)) {
+ if (strchr ("ZHhlq", *s)) {
switch (*s) {
case 'H':
if (sizeof(hsize_t)==sizeof(long)) {
@@ -544,6 +544,16 @@ HDfprintf (FILE *stream, const char *fmt, ...)
strcpy (modifier, PRINTF_LL_WIDTH);
}
break;
+ case 'Z':
+ if (sizeof(size_t)==sizeof(long)) {
+ strcpy (modifier, "l");
+ } else if (sizeof(size_t)==sizeof(long long)) {
+ strcpy (modifier, PRINTF_LL_WIDTH);
+ } else if (sizeof(size_t)==sizeof(int)) {
+ modifier[0] = '\0';
+ }
+ break;
+
default:
modifier[0] = *s;
modifier[1] = '\0';
diff --git a/src/H5AC.c b/src/H5AC.c
index 98e7dfa..986b35b 100644
--- a/src/H5AC.c
+++ b/src/H5AC.c
@@ -50,7 +50,7 @@ static H5AC_t *current_cache_g = NULL; /*for sorting */
* pass an invalid value then H5AC_NSLOTS is used. You can
* turn off caching by using 1 for the SIZE_HINT value.
*
- * Return: Success: SUCCEED
+ * Return: Success: Number of slots actually used.
*
* Failure: FAIL
*
@@ -62,7 +62,7 @@ static H5AC_t *current_cache_g = NULL; /*for sorting */
*
*-------------------------------------------------------------------------
*/
-herr_t
+intn
H5AC_create(H5F_t *f, intn size_hint)
{
H5AC_t *cache = NULL;
@@ -70,14 +70,13 @@ H5AC_create(H5F_t *f, intn size_hint)
assert(f);
assert(NULL == f->shared->cache);
- if (size_hint < 1)
- size_hint = H5AC_NSLOTS;
+ if (size_hint < 1) size_hint = H5AC_NSLOTS;
f->shared->cache = cache = H5MM_xcalloc(1, sizeof(H5AC_t));
cache->nslots = size_hint;
cache->slot = H5MM_xcalloc((intn)(cache->nslots), sizeof(H5AC_slot_t));
- FUNC_LEAVE(SUCCEED);
+ FUNC_LEAVE(size_hint);
}
/*-------------------------------------------------------------------------
@@ -838,7 +837,7 @@ H5AC_debug(H5F_t *f)
FUNC_ENTER(H5AC_debug, FAIL);
- fprintf(stderr, "H5AC: cache statistics for file %s\n", f->name);
+ fprintf(stderr, "H5AC: meta data cache statistics for file %s\n", f->name);
fprintf(stderr, " %-18s %8s %8s %8s %8s+%-8s\n",
"Layer", "Hits", "Misses", "MissRate", "Inits", "Flushes");
fprintf(stderr, " %-18s %8s %8s %8s %8s-%-8s\n",
@@ -866,10 +865,12 @@ H5AC_debug(H5F_t *f)
sprintf(s, "unknown id %d", i);
}
- if (cache->diagnostics[i].nhits) {
+ if (cache->diagnostics[i].nhits>0 ||
+ cache->diagnostics[i].nmisses>0) {
miss_rate = 100.0 * cache->diagnostics[i].nmisses /
- cache->diagnostics[i].nhits;
- } else {
+ (cache->diagnostics[i].nhits+
+ cache->diagnostics[i].nmisses);
+ } else {
miss_rate = 0.0;
}
diff --git a/src/H5D.c b/src/H5D.c
index 90ed3d0..f07efd8 100644
--- a/src/H5D.c
+++ b/src/H5D.c
@@ -1252,8 +1252,8 @@ H5D_read(H5D_t *dataset, const H5T_t *mem_type, const H5S_t *mem_space,
mem_space, buf/*out*/);
if (status>=0) goto succeed;
#ifdef H5D_DEBUG
- fprintf (stderr, "H5D: input pipe optimization failed "
- "(falling through)\n");
+ fprintf (stderr, "H5D: data space conversion could not be optimized "
+ "for this case (using general method instead)\n");
#endif
H5E_clear ();
}
@@ -1547,8 +1547,8 @@ H5D_write(H5D_t *dataset, const H5T_t *mem_type, const H5S_t *mem_space,
mem_space, buf);
if (status>=0) goto succeed;
#ifdef H5D_DEBUG
- fprintf (stderr, "H5D: output pipe optimization failed "
- "(falling through)\n");
+ fprintf (stderr, "H5D: data space conversion could not be optimized "
+ "for this case (using general method instead)\n");
#endif
H5E_clear ();
}
diff --git a/src/H5Distore.c b/src/H5Distore.c
index 7e80f1a..91c4e9d 100644
--- a/src/H5Distore.c
+++ b/src/H5Distore.c
@@ -2,8 +2,17 @@
* Copyright (C) 1997 NCSA
* All rights reserved.
*
- * Programmer: Robb Matzke <matzke@llnl.gov>
- * Wednesday, October 8, 1997
+ * Programmer: Robb Matzke <matzke@llnl.gov>
+ * Wednesday, October 8, 1997
+ *
+ * Purpose: Indexed (chunked) I/O functions. The logical
+ * multi-dimensional data space is regularly partitioned into
+ * same-sized "chunks", the first of which is aligned with the
+ * logical origin. The chunks are given a multi-dimensional
+ * index which is used as a lookup key in a B-tree that maps
+ * chunk index to disk address. Each chunk can be compressed
+ * independently and the chunks may move around in the file as
+ * their storage requirements change.
*/
#include <H5private.h>
#include <H5Dprivate.h>
@@ -14,13 +23,25 @@
#include <H5Oprivate.h>
#include <H5Vprivate.h>
-
/* Interface initialization */
#define PABLO_MASK H5F_istore_mask
static hbool_t interface_initialize_g = FALSE;
#define INTERFACE_INIT NULL
-/* PRIVATE PROTOTYPES */
+/* Raw data chunks are cached. Each entry in the cache is: */
+typedef struct H5F_rdcc_ent_t {
+ hbool_t locked; /*entry is locked in cache */
+ hbool_t dirty; /*needs to be written to disk? */
+ H5O_layout_t *layout; /*the layout message */
+ H5O_compress_t *comp; /*compression message */
+ hssize_t offset[H5O_LAYOUT_NDIMS]; /*chunk name */
+ size_t chunk_size; /*size of a chunk */
+ size_t rd_count; /*bytes remaining to be read */
+ size_t wr_count; /*bytes remaining to be written */
+ uint8 *chunk; /*the uncompressed chunk data */
+} H5F_rdcc_ent_t;
+
+/* Private prototypes */
static size_t H5F_istore_sizeof_rkey(H5F_t *f, const void *_udata);
static herr_t H5F_istore_new_node(H5F_t *f, H5B_ins_t, void *_lt_key,
void *_udata, void *_rt_key, haddr_t *);
@@ -546,7 +567,6 @@ H5F_istore_insert(H5F_t *f, const haddr_t *addr, void *_lt_key,
* Already exists. If the new size is not the same as the old size
* then we should reallocate storage.
*/
-#if 1
if (lt_key->nbytes != udata->key.nbytes) {
if (H5MF_realloc (f, H5MF_RAW, lt_key->nbytes, addr,
udata->key.nbytes, new_node/*out*/)<0) {
@@ -561,13 +581,6 @@ H5F_istore_insert(H5F_t *f, const haddr_t *addr, void *_lt_key,
udata->addr = *addr;
ret_value = H5B_INS_NOOP;
}
-#else
- assert (lt_key->nbytes == udata->key.nbytes);
- assert (!H5F_addr_defined (&(udata->addr)) ||
- H5F_addr_eq (&(udata->addr), addr));
- udata->addr = *addr;
- ret_value = H5B_INS_NOOP;
-#endif
} else if (H5V_hyper_disjointp(udata->mesg.ndims,
lt_key->offset, udata->mesg.dim,
@@ -606,6 +619,596 @@ H5F_istore_insert(H5F_t *f, const haddr_t *addr, void *_lt_key,
/*-------------------------------------------------------------------------
+ * Function: H5F_istore_init
+ *
+ * Purpose: Initialize the raw data chunk cache for a file. This is
+ * called when the file handle is initialized.
+ *
+ * Return: Success: SUCCEED
+ *
+ * Failure: FAIL
+ *
+ * Programmer: Robb Matzke
+ * Monday, May 18, 1998
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5F_istore_init (H5F_t *f)
+{
+ H5F_rdcc_t *rdcc = &(f->shared->rdcc);
+
+ FUNC_ENTER (H5F_istore_init, FAIL);
+
+ HDmemset (rdcc, 0, sizeof(H5F_rdcc_t));
+ if (f->shared->access_parms->rdcc_nbytes>0) {
+ rdcc->nslots = 25; /*some initial number of slots*/
+ rdcc->slot = H5MM_xcalloc (rdcc->nslots, sizeof(H5F_rdcc_ent_t));
+ }
+
+ FUNC_LEAVE (SUCCEED);
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5F_istore_flush_entry
+ *
+ * Purpose: Writes a chunk to disk.
+ *
+ * Return: Success: SUCCEED
+ *
+ * Failure: FAIL
+ *
+ * Programmer: Robb Matzke
+ * Thursday, May 21, 1998
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5F_istore_flush_entry (H5F_t *f, H5F_rdcc_ent_t *ent)
+{
+ void *c_buf = NULL; /*temp compression buffer */
+ void *out_ptr = NULL; /*ptr to output buffer */
+ size_t nbytes; /*size of output buffer */
+ herr_t ret_value = FAIL; /*return value */
+ H5F_istore_ud1_t udata; /*pass through B-tree */
+ intn i;
+
+ FUNC_ENTER (H5F_istore_flush_entry, FAIL);
+ assert (ent);
+ assert (!ent->locked);
+ if (!ent->dirty) HRETURN (SUCCEED);
+
+ /* Should the chunk be compressed before writing it to disk? */
+ if (ent->comp && H5Z_NONE!=ent->comp->method) {
+ c_buf = H5MM_xmalloc (ent->chunk_size);
+ nbytes = H5Z_compress (ent->comp, ent->chunk_size, ent->chunk, c_buf);
+ if (nbytes && nbytes<ent->chunk_size) {
+ out_ptr = c_buf;
+ } else {
+ out_ptr = ent->chunk;
+ nbytes = ent->chunk_size;
+ }
+ } else {
+ out_ptr = ent->chunk;
+ nbytes = ent->chunk_size;
+ }
+
+ /*
+ * Create the chunk it if it doesn't exist, or reallocate the chunk if its
+ * size changed. Then write the data into the file.
+ */
+ udata.mesg = *(ent->layout);
+ H5F_addr_undef(&(udata.addr));
+ udata.key.nbytes = nbytes;
+ for (i=0; i<ent->layout->ndims; i++) {
+ udata.key.offset[i] = ent->offset[i];
+ }
+
+ if (H5B_insert(f, H5B_ISTORE, &(ent->layout->addr), &udata)<0) {
+ HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL,
+ "unable to allocate chunk");
+ }
+ if (H5F_block_write (f, &(udata.addr), nbytes, out_ptr)<0) {
+ HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL,
+ "unable to write raw data to file");
+ }
+
+ /* Mark cache entry as clean */
+ ent->dirty = FALSE;
+ f->shared->rdcc.nflushes++;
+ ret_value = SUCCEED;
+
+ done:
+ H5MM_xfree (c_buf);
+ FUNC_LEAVE (ret_value);
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5F_istore_flush
+ *
+ * Purpose: Writes all dirty chunks to disk but does not remove them from
+ * the cache.
+ *
+ * Return: Success: SUCCEED
+ *
+ * Failure: FAIL
+ *
+ * Programmer: Robb Matzke
+ * Thursday, May 21, 1998
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5F_istore_flush (H5F_t *f)
+{
+ H5F_rdcc_t *rdcc = &(f->shared->rdcc);
+ intn i, nerrors=0;
+
+ FUNC_ENTER (H5F_istore_flush, FAIL);
+
+ for (i=0; i<rdcc->nused; i++) {
+ if (H5F_istore_flush_entry (f, rdcc->slot+i)<0) {
+ nerrors++;
+ }
+ }
+ if (nerrors) {
+ HRETURN_ERROR (H5E_IO, H5E_CANTFLUSH, FAIL,
+ "unable to flush one or more raw data chunks");
+ }
+ FUNC_LEAVE (SUCCEED);
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5F_istore_preempt
+ *
+ * Purpose: Preempts the specified entry from the cache, flushing it to
+ * disk if necessary.
+ *
+ * Return: Success: SUCCEED
+ *
+ * Failure: FAIL
+ *
+ * Programmer: Robb Matzke
+ * Thursday, May 21, 1998
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5F_istore_preempt (H5F_t *f, intn idx)
+{
+ H5F_rdcc_t *rdcc = &(f->shared->rdcc);
+ H5F_rdcc_ent_t *ent = rdcc->slot + idx;
+
+ FUNC_ENTER (H5F_istore_preempt, FAIL);
+ assert (idx>=0 && idx<rdcc->nused);
+ assert (!ent->locked);
+
+ if (ent->dirty) H5F_istore_flush_entry (f, ent);
+ H5O_free (H5O_LAYOUT, ent->layout);
+ H5O_free (H5O_COMPRESS, ent->comp);
+ H5MM_xfree (ent->chunk);
+ rdcc->nused -= 1;
+ rdcc->nbytes -= ent->chunk_size;
+ HDmemmove (rdcc->slot+idx, rdcc->slot+idx+1,
+ (rdcc->nused-idx) * sizeof(H5F_rdcc_ent_t));
+
+ FUNC_LEAVE (SUCCEED);
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5F_istore_dest
+ *
+ * Purpose: Destroy the entire chunk cache by flushing dirty entries,
+ * preempting all entries, and freeing the cache itself.
+ *
+ * Return: Success: SUCCEED
+ *
+ * Failure: FAIL
+ *
+ * Programmer: Robb Matzke
+ * Thursday, May 21, 1998
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5F_istore_dest (H5F_t *f)
+{
+ H5F_rdcc_t *rdcc = &(f->shared->rdcc);
+ intn i, nerrors=0;
+
+ FUNC_ENTER (H5F_istore_dest, FAIL);
+
+ for (i=rdcc->nused-1; i>=0; --i) {
+ if (H5F_istore_flush_entry (f, rdcc->slot+i)<0) {
+ nerrors++;
+ }
+ if (H5F_istore_preempt (f, i)<0) {
+ nerrors++;
+ }
+ }
+ if (nerrors) {
+ HRETURN_ERROR (H5E_IO, H5E_CANTFLUSH, FAIL,
+ "unable to flush one or more raw data chunks");
+ }
+
+ H5MM_xfree (rdcc->slot);
+ HDmemset (rdcc, 0, sizeof(H5F_rdcc_t));
+ FUNC_LEAVE (SUCCEED);
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5F_istore_prune
+ *
+ * Purpose: Prune the cache by preempting some things until the cache has
+ * room for something which is SIZE bytes. Only unlocked
+ * entries are considered for preemption.
+ *
+ * Return: Success: SUCCEED
+ *
+ * Failure: FAIL
+ *
+ * Programmer: Robb Matzke
+ * Thursday, May 21, 1998
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5F_istore_prune (H5F_t *f, size_t size)
+{
+ intn i, meth0, meth1, nerrors=0;
+ H5F_rdcc_t *rdcc = &(f->shared->rdcc);
+ H5F_rdcc_ent_t *ent0, *ent1;
+ double w0 = f->shared->access_parms->rdcc_w0;
+ size_t total = f->shared->access_parms->rdcc_nbytes;
+
+ FUNC_ENTER (H5F_istore_prune, FAIL);
+
+ /*
+ * We have two pointers that slide down the cache beginning at the least
+ * recently used entry. The distance between the pointers represents the
+ * relative weight. A weight of 50% for the first pointer means that the
+ * second pointer is half the cache length behind the first pointer.
+ */
+ meth0 = rdcc->nused;
+ meth1 = rdcc->nused * (1.0+w0);
+ for (i=MAX(meth0, meth1)-1;
+ rdcc->nbytes+size>total && i>=0;
+ --i, --meth0, --meth1) {
+
+ ent0 = rdcc->slot+meth0; /*might be a bad pointer!*/
+ ent1 = rdcc->slot+meth1; /*might be a bad pointer!*/
+
+ if (meth0>=0 && meth0<rdcc->nused && !ent0->locked &&
+ (0==ent0->rd_count || ent0->chunk_size==ent0->rd_count) &&
+ (0==ent0->wr_count || ent0->chunk_size==ent0->wr_count)) {
+ /*
+ * Method 0: Preempt entries that have a zero rd_count. If the
+ * application is accessing a dataset with a set of
+ * non-overlapping partial I/O requests then chunks with a zero
+ * rd_count will probably not be accessed in the near future.
+ */
+ if (H5F_istore_preempt (f, meth0)<0) nerrors++;
+
+ } else if (meth1>=0 && meth1<rdcc->nused && !ent1->locked) {
+ /*
+ * Method 1: Discard the least recently used members from the
+ * cache. This is a catch-all.
+ */
+ if (H5F_istore_preempt (f, meth1)<0) nerrors++;
+ }
+ }
+ if (nerrors) {
+ HRETURN_ERROR (H5E_IO, H5E_CANTFLUSH, FAIL,
+ "unable to preempt one or more raw data cache entry");
+ }
+
+ FUNC_LEAVE (SUCCEED);
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5F_istore_lock
+ *
+ * Purpose: Return a pointer to an uncompressed chunk. The pointer
+ * points directly into the chunk cache and should not be freed
+ * by the caller but will be valid until it is unlocked. The
+ * input value IDX_HINT is used to speed up cache lookups and
+ * it's output value should be given to H5F_rdcc_unlock().
+ *
+ * If RELAX is non-zero and the chunk isn't in the cache then
+ * don't try to read it from the file, but just allocate an
+ * uninitialized buffer to hold the result. This is indented
+ * for output functions that are about to overwrite the entire
+ * chunk.
+ *
+ * Return: Success: Ptr to an uncompressed chunk.
+ *
+ * Failure: NULL
+ *
+ * Programmer: Robb Matzke
+ * Thursday, May 21, 1998
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+static void *
+H5F_istore_lock (H5F_t *f, const H5O_layout_t *layout,
+ const H5O_compress_t *comp, const hssize_t offset[],
+ hbool_t relax, intn *idx_hint/*in,out*/)
+{
+ H5F_rdcc_t *rdcc = &(f->shared->rdcc);
+ H5F_rdcc_ent_t *ent = NULL;
+ intn i, j, found = -1;
+ H5F_istore_ud1_t udata; /*B-tree pass-through */
+ size_t chunk_size; /*size of a chunk */
+ herr_t status; /*func return status */
+ void *chunk=NULL; /*the uncompressed chunk*/
+ void *temp=NULL; /*temporary chunk buffer*/
+ void *ret_value=NULL; /*return value */
+
+ FUNC_ENTER (H5F_istore_lock, NULL);
+
+ /* First use the hint */
+ if (idx_hint && *idx_hint>=0 && *idx_hint<rdcc->nused) {
+ ent = rdcc->slot + *idx_hint;
+ if (layout->ndims==ent->layout->ndims) {
+ for (i=0, found=*idx_hint; found>=0 && i<ent->layout->ndims; i++) {
+ if (offset[i]!=ent->offset[i]) found = -1;
+ }
+ }
+ }
+
+ /* Then look at all the entries */
+ for (i=0; found<0 && i<rdcc->nused; i++) {
+ ent = rdcc->slot + i;
+ if (layout->ndims==ent->layout->ndims) {
+ for (j=0, found=i; found>=0 && j<ent->layout->ndims; j++) {
+ if (offset[j]!=ent->offset[j]) found = -1;
+ }
+ }
+ }
+
+
+ if (found>=0) {
+ /*
+ * Already in the cache. Count a hit.
+ */
+ rdcc->nhits++;
+
+ } else if (found<0 && relax) {
+ /*
+ * Not in the cache, but we're about to overwrite the whole thing
+ * anyway, so just allocate a buffer for it but don't initialize that
+ * buffer with the file contents. Count this as a hit instead of a
+ * miss because we saved ourselves lots of work.
+ */
+ rdcc->nhits++;
+ for (i=0, chunk_size=1; i<layout->ndims; i++) {
+ chunk_size *= layout->dim[i];
+ }
+ chunk = H5MM_xmalloc (chunk_size);
+
+ } else {
+ /*
+ * Not in the cache. Read it from the file and count this as a miss
+ * if it's in the file or an init if it isn't.
+ */
+ for (i=0, chunk_size=1; i<layout->ndims; i++) {
+ udata.key.offset[i] = offset[i];
+ chunk_size *= layout->dim[i];
+ }
+ udata.mesg = *layout;
+ H5F_addr_undef (&(udata.addr));
+ status = H5B_find (f, H5B_ISTORE, &(layout->addr), &udata);
+ chunk = H5MM_xmalloc (chunk_size);
+ if (status>=0 && H5F_addr_defined (&(udata.addr))) {
+ /*
+ * The chunk exists on disk but might be compressed. Instead of
+ * allocating the exact size for the compressed chunk we allocate
+ * the entire chunk size -- it reduces strain on the malloc()
+ * subsystem.
+ */
+ if (H5F_block_read (f, &(udata.addr), udata.key.nbytes, chunk)<0) {
+ HGOTO_ERROR (H5E_IO, H5E_READERROR, NULL,
+ "unable to read raw data chunk");
+ }
+ if (udata.key.nbytes<chunk_size) {
+ temp = H5MM_xmalloc (chunk_size);
+ if (chunk_size!=H5Z_uncompress (comp, udata.key.nbytes,
+ chunk, chunk_size, temp)) {
+ HGOTO_ERROR (H5E_IO, H5E_READERROR, NULL,
+ "unable to uncompress raw data chunk");
+ }
+ H5MM_xfree (chunk);
+ chunk = temp;
+ temp = NULL;
+ }
+ rdcc->nmisses++;
+ } else {
+ /*
+ * The chunk doesn't exist in the file. Assume all zeros.
+ */
+ HDmemset (chunk, 0, chunk_size);
+ rdcc->ninits++;
+ }
+ }
+
+ if (found<0 && chunk_size<=f->shared->access_parms->rdcc_nbytes) {
+ /*
+ * Add the chunk to the beginning of the cache after pruning the cache
+ * to make room.
+ */
+ if (H5F_istore_prune (f, chunk_size)<0) {
+ H5E_clear ();
+ }
+ if (rdcc->nused>=rdcc->nslots) {
+ rdcc->nslots = MAX (25, 2*rdcc->nslots);
+ rdcc->slot = H5MM_xrealloc (rdcc->slot,
+ (rdcc->nslots*
+ sizeof(H5F_rdcc_ent_t)));
+ }
+ HDmemmove (rdcc->slot+1, rdcc->slot,
+ rdcc->nused*sizeof(H5F_rdcc_ent_t));
+ rdcc->nused++;
+ rdcc->nbytes += chunk_size;
+ ent = rdcc->slot;
+ ent->locked = 0;
+ ent->dirty = FALSE;
+ ent->chunk_size = chunk_size;
+ ent->layout = H5O_copy (H5O_LAYOUT, layout);
+ ent->comp = H5O_copy (H5O_COMPRESS, comp);
+ for (i=0; i<layout->ndims; i++) {
+ ent->offset[i] = offset[i];
+ }
+ ent->rd_count = chunk_size;
+ ent->wr_count = chunk_size;
+ ent->chunk = chunk;
+ found = 0;
+
+ } else if (found<0) {
+ /*
+ * The chunk is larger than the entire cache so we don't cache it.
+ * This is the reason all those arguments have to be repeated for the
+ * unlock function.
+ */
+ ent = NULL;
+ found = -999;
+
+ } else if (found>0) {
+ /*
+ * The chunk is not at the beginning of the cache; move it forward by
+ * one slot. This is how we implement the LRU preemption algorithm.
+ */
+ H5F_rdcc_ent_t x = rdcc->slot[found];
+ rdcc->slot[found] = rdcc->slot[found-1];
+ rdcc->slot[found-1] = x;
+ ent = rdcc->slot + --found;
+ }
+
+ /* Lock the chunk into the cache */
+ if (ent) {
+ assert (!ent->locked);
+ ent->locked = TRUE;
+ if (idx_hint) *idx_hint = found;
+ chunk = ent->chunk;
+ }
+
+ ret_value = chunk;
+ done:
+ if (!ret_value) H5MM_xfree (chunk);
+ H5MM_xfree (temp);
+ FUNC_LEAVE (ret_value);
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5F_istore_unlock
+ *
+ * Purpose: Unlocks a previously locked chunk. The LAYOUT, COMP, and
+ * OFFSET arguments should be the same as for H5F_rdcc_lock().
+ * The DIRTY argument should be set to non-zero if the chunk has
+ * been modified since it was locked. The IDX_HINT argument is
+ * the returned index hint from the lock operation and BUF is
+ * the return value from the lock.
+ *
+ * The NACCESSED argument should be the number of bytes accessed
+ * for reading or writing (depending on the value of DIRTY).
+ * It's only purpose is to provide additional information to the
+ * preemption policy.
+ *
+ * Return: Success: SUCCEED
+ *
+ * Failure: FAIL
+ *
+ * Programmer: Robb Matzke
+ * Thursday, May 21, 1998
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5F_istore_unlock (H5F_t *f, const H5O_layout_t *layout,
+ const H5O_compress_t *comp, hbool_t dirty,
+ const hssize_t offset[], intn *idx_hint,
+ uint8 *chunk, size_t naccessed)
+{
+ H5F_rdcc_t *rdcc = &(f->shared->rdcc);
+ H5F_rdcc_ent_t *ent = NULL;
+ intn i, found = -1;
+
+ FUNC_ENTER (H5F_istore_unlock, FAIL);
+
+ /* First look at the hint */
+ if (idx_hint && *idx_hint>=0 && *idx_hint<rdcc->nused) {
+ if (rdcc->slot[*idx_hint].chunk==chunk) found = *idx_hint;
+ }
+
+ /* Then look at all the entries */
+ for (i=0; found<0 && i<rdcc->nused; i++) {
+ if (rdcc->slot[i].chunk==chunk) found = i;
+ }
+
+ if (found<0) {
+ /*
+ * It's not in the cache, probably because it's too big. If it's
+ * dirty then flush it to disk. In any case, free the chunk.
+ * Note: we have to copy the layout and compression messages so we
+ * don't discard the `const' qualifier.
+ */
+ if (dirty) {
+ H5F_rdcc_ent_t x;
+ HDmemset (&x, 0, sizeof x);
+ x.dirty = TRUE;
+ x.layout = H5O_copy (H5O_LAYOUT, layout);
+ x.comp = H5O_copy (H5O_COMPRESS, comp);
+ for (i=0; i<layout->ndims; i++) {
+ x.offset[i] = offset[i];
+ }
+ x.chunk = chunk;
+ H5F_istore_flush_entry (f, &x);
+ H5O_free (H5O_LAYOUT, x.layout);
+ H5O_free (H5O_COMPRESS, x.comp);
+ }
+ H5MM_xfree (chunk);
+ } else {
+ /*
+ * It's in the cache so unlock it.
+ */
+ ent = rdcc->slot + found;
+ assert (ent->locked);
+ if (dirty) {
+ ent->dirty = TRUE;
+ ent->wr_count -= MIN (ent->wr_count, naccessed);
+ } else {
+ ent->rd_count -= MIN (ent->rd_count, naccessed);
+ }
+ ent->locked = FALSE;
+ }
+
+ FUNC_LEAVE (SUCCEED);
+}
+
+
+/*-------------------------------------------------------------------------
* Function: H5F_istore_read
*
* Purpose: Reads a multi-dimensional buffer from (part of) an indexed raw
@@ -629,18 +1232,17 @@ H5F_istore_read(H5F_t *f, const H5O_layout_t *layout,
{
hssize_t offset_m[H5O_LAYOUT_NDIMS];
hsize_t size_m[H5O_LAYOUT_NDIMS];
- intn i, carry;
hsize_t idx_cur[H5O_LAYOUT_NDIMS];
hsize_t idx_min[H5O_LAYOUT_NDIMS];
hsize_t idx_max[H5O_LAYOUT_NDIMS];
hsize_t sub_size[H5O_LAYOUT_NDIMS];
hssize_t offset_wrt_chunk[H5O_LAYOUT_NDIMS];
hssize_t sub_offset_m[H5O_LAYOUT_NDIMS];
- size_t chunk_size;
- uint8 *chunk=NULL, *compressed=NULL;
- H5F_istore_ud1_t udata;
- herr_t status;
- herr_t ret_value = FAIL;
+ hssize_t chunk_offset[H5O_LAYOUT_NDIMS];
+ intn i, carry;
+ size_t naccessed; /*bytes accessed in chnk*/
+ uint8 *chunk=NULL; /*ptr to a chunk buffer */
+ intn idx_hint=0; /*cache index hint */
FUNC_ENTER(H5F_istore_read, FAIL);
@@ -672,61 +1274,10 @@ H5F_istore_read(H5F_t *f, const H5O_layout_t *layout,
}
#endif
- /* Determine the chunk size and allocate buffers */
- for (i=0, chunk_size=1; i<layout->ndims; i++) {
- chunk_size *= layout->dim[i];
- }
- chunk = H5MM_xmalloc(chunk_size);
- if (comp && H5Z_NONE!=comp->method) {
- compressed = H5MM_xmalloc (chunk_size);
- }
-
- /*
- * As a special case if the source is aligned on a chunk boundary and is
- * the same size as a chunk, and the destination is the same size as a
- * chunk, then instead of reading into a temporary buffer and then into
- * the destination, we read directly into the destination.
- */
- for (i=0; i<layout->ndims; i++) {
- if (offset_f[i] % layout->dim[i]) break; /*src not aligned*/
- if (size[i]!=layout->dim[i]) break; /*src not a chunk*/
- if (size_m[i]!=layout->dim[i]) break; /*dst not a chunk*/
- udata.key.offset[i] = offset_f[i];
- }
- if (i==layout->ndims) {
- udata.mesg = *layout;
- H5F_addr_undef (&(udata.addr));
- status = H5B_find (f, H5B_ISTORE, &(layout->addr), &udata);
- if (status>=0 && H5F_addr_defined (&(udata.addr))) {
- if (compressed && udata.key.nbytes<chunk_size) {
- if (H5F_block_read (f, &(udata.addr), udata.key.nbytes,
- compressed)<0) {
- HGOTO_ERROR (H5E_IO, H5E_READERROR, FAIL,
- "unable to read raw storage chunk");
- }
- if (chunk_size!=H5Z_uncompress (comp, udata.key.nbytes,
- compressed, chunk_size, buf)) {
- HGOTO_ERROR (H5E_IO, H5E_READERROR, FAIL,
- "unable to uncompress raw storage chunk");
- }
- } else {
- assert (udata.key.nbytes==chunk_size);
- if (H5F_block_read (f, &(udata.addr), chunk_size, buf)<0) {
- HGOTO_ERROR (H5E_IO, H5E_READERROR, FAIL,
- "unable to read raw storage chunk");
- }
- }
- } else {
- HDmemset (buf, 0, chunk_size);
- }
- HGOTO_DONE (SUCCEED);
- }
-
/*
- * This is the general case. We set up multi-dimensional counters
- * (idx_min, idx_max, and idx_cur) and loop through the chunks compressing
- * or copying each chunk into a temporary buffer, and then copying it to
- * it's destination.
+ * Set up multi-dimensional counters (idx_min, idx_max, and idx_cur) and
+ * loop through the chunks copying each to its final destination in the
+ * application buffer.
*/
for (i=0; i<layout->ndims; i++) {
idx_min[i] = offset_f[i] / layout->dim[i];
@@ -734,59 +1285,42 @@ H5F_istore_read(H5F_t *f, const H5O_layout_t *layout,
idx_cur[i] = idx_min[i];
}
- /* Initialize non-changing part of udata */
- udata.mesg = *layout;
-
/* Loop over all chunks */
while (1) {
-
- for (i=0; i<layout->ndims; i++) {
+ for (i=0, naccessed=1; i<layout->ndims; i++) {
/* The location and size of the chunk being accessed */
assert (layout->dim[i] < MAX_HSSIZET);
- udata.key.offset[i] = idx_cur[i] * (hssize_t)(layout->dim[i]);
+ chunk_offset[i] = idx_cur[i] * (hssize_t)(layout->dim[i]);
/* The offset and size wrt the chunk */
- offset_wrt_chunk[i] = MAX(offset_f[i], udata.key.offset[i]) -
- udata.key.offset[i];
+ offset_wrt_chunk[i] = MAX(offset_f[i], chunk_offset[i]) -
+ chunk_offset[i];
sub_size[i] = MIN((idx_cur[i]+1)*layout->dim[i],
offset_f[i]+size[i]) -
- (udata.key.offset[i] + offset_wrt_chunk[i]);
+ (chunk_offset[i] + offset_wrt_chunk[i]);
+ naccessed *= sub_size[i];
/* Offset into mem buffer */
- sub_offset_m[i] = udata.key.offset[i] + offset_wrt_chunk[i] +
+ sub_offset_m[i] = chunk_offset[i] + offset_wrt_chunk[i] +
offset_m[i] - offset_f[i];
}
- /* Read chunk */
- H5F_addr_undef(&(udata.addr));
- status = H5B_find(f, H5B_ISTORE, &(layout->addr), &udata);
- if (status>=0 && H5F_addr_defined(&(udata.addr))) {
- if (compressed && udata.key.nbytes<chunk_size) {
- if (H5F_block_read (f, &(udata.addr), udata.key.nbytes,
- compressed)<0) {
- HGOTO_ERROR (H5E_IO, H5E_READERROR, FAIL,
- "unable to read raw storage chunk");
- }
- if (chunk_size!=H5Z_uncompress (comp, udata.key.nbytes,
- compressed, chunk_size,
- chunk)) {
- HGOTO_ERROR (H5E_IO, H5E_READERROR, FAIL,
- "unable to uncompress data");
- }
- } else {
- assert (udata.key.nbytes == chunk_size);
- if (H5F_block_read(f, &(udata.addr), chunk_size, chunk) < 0) {
- HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL,
- "unable to read raw storage chunk");
- }
- }
- } else {
- HDmemset(chunk, 0, chunk_size);
+ /*
+ * Lock the chunk, transfer data to the application, then unlock the
+ * chunk.
+ */
+ if (NULL==(chunk=H5F_istore_lock (f, layout, comp, chunk_offset,
+ FALSE, &idx_hint))) {
+ HRETURN_ERROR (H5E_IO, H5E_READERROR, FAIL,
+ "unable to read raw data chunk");
}
-
- /* Transfer data from the chunk buffer to the application */
H5V_hyper_copy(layout->ndims, sub_size, size_m, sub_offset_m,
- (void *)buf, layout->dim, offset_wrt_chunk, chunk);
+ (void*)buf, layout->dim, offset_wrt_chunk, chunk);
+ if (H5F_istore_unlock (f, layout, comp, FALSE, chunk_offset, &idx_hint,
+ chunk, naccessed)<0) {
+ HRETURN_ERROR (H5E_IO, H5E_READERROR, FAIL,
+ "unable to unlock raw data chunk");
+ }
/* Increment indices */
for (i=layout->ndims-1, carry=1; i>=0 && carry; --i) {
@@ -795,12 +1329,7 @@ H5F_istore_read(H5F_t *f, const H5O_layout_t *layout,
}
if (carry) break;
}
- ret_value = SUCCEED;
-
- done:
- H5MM_xfree(chunk);
- H5MM_xfree (compressed);
- FUNC_LEAVE(ret_value);
+ FUNC_LEAVE(SUCCEED);
}
@@ -833,12 +1362,12 @@ H5F_istore_write(H5F_t *f, const H5O_layout_t *layout,
hsize_t idx_min[H5O_LAYOUT_NDIMS];
hsize_t idx_max[H5O_LAYOUT_NDIMS];
hsize_t sub_size[H5O_LAYOUT_NDIMS];
+ hssize_t chunk_offset[H5O_LAYOUT_NDIMS];
hssize_t offset_wrt_chunk[H5O_LAYOUT_NDIMS];
hssize_t sub_offset_m[H5O_LAYOUT_NDIMS];
- hsize_t chunk_size, nbytes;
- uint8 *chunk=NULL, *compressed=NULL, *outbuf;
- H5F_istore_ud1_t udata;
- herr_t ret_value = FAIL;
+ uint8 *chunk=NULL;
+ intn idx_hint=0;
+ size_t chunk_size, naccessed;
FUNC_ENTER(H5F_istore_write, FAIL);
@@ -855,9 +1384,10 @@ H5F_istore_write(H5F_t *f, const H5O_layout_t *layout,
* For now the source must not be a hyperslab. It must be an entire
* memory buffer.
*/
- for (i=0; i<layout->ndims; i++) {
+ for (i=0, chunk_size=1; i<layout->ndims; i++) {
offset_m[i] = 0;
size_m[i] = size[i];
+ chunk_size *= layout->dim[i];
}
#ifndef NDEBUG
@@ -871,10 +1401,9 @@ H5F_istore_write(H5F_t *f, const H5O_layout_t *layout,
#endif
/*
- * This is the general case. We set up multi-dimensional counters
- * (idx_min, idx_max, and idx_cur) and loop through the chunks copying
- * each chunk into a temporary buffer, compressing or decompressing, and
- * then copying it to it's destination.
+ * Set up multi-dimensional counters (idx_min, idx_max, and idx_cur) and
+ * loop through the chunks copying each chunk from the application to the
+ * chunk cache.
*/
for (i=0; i<layout->ndims; i++) {
idx_min[i] = offset_f[i] / layout->dim[i];
@@ -882,102 +1411,46 @@ H5F_istore_write(H5F_t *f, const H5O_layout_t *layout,
idx_cur[i] = idx_min[i];
}
- /* Allocate buffers */
- for (i=0, chunk_size=1; i<layout->ndims; i++) {
- chunk_size *= layout->dim[i];
- }
- chunk = H5MM_xmalloc(chunk_size);
- if (comp && H5Z_NONE!=comp->method) {
- compressed = H5MM_xmalloc (chunk_size);
- }
-
- /* Initialize non-changing part of udata */
- udata.mesg = *layout;
/* Loop over all chunks */
while (1) {
-
- for (i=0; i<layout->ndims; i++) {
+
+ for (i=0, naccessed=1; i<layout->ndims; i++) {
/* The location and size of the chunk being accessed */
assert (layout->dim[i] < MAX_HSSIZET);
- udata.key.offset[i] = idx_cur[i] * (hssize_t)(layout->dim[i]);
+ chunk_offset[i] = idx_cur[i] * (hssize_t)(layout->dim[i]);
/* The offset and size wrt the chunk */
- offset_wrt_chunk[i] = MAX(offset_f[i], udata.key.offset[i]) -
- udata.key.offset[i];
+ offset_wrt_chunk[i] = MAX(offset_f[i], chunk_offset[i]) -
+ chunk_offset[i];
sub_size[i] = MIN((idx_cur[i]+1)*layout->dim[i],
offset_f[i]+size[i]) -
- (udata.key.offset[i] + offset_wrt_chunk[i]);
+ (chunk_offset[i] + offset_wrt_chunk[i]);
+ naccessed *= sub_size[i];
/* Offset into mem buffer */
- sub_offset_m[i] = udata.key.offset[i] + offset_wrt_chunk[i] +
+ sub_offset_m[i] = chunk_offset[i] + offset_wrt_chunk[i] +
offset_m[i] - offset_f[i];
}
-
+
/*
- * If we are writing a partial chunk then load the chunk from disk
- * and uncompress it if it exists.
+ * Lock the chunk, copy from application to chunk, then unlock the
+ * chunk.
*/
- if (!H5V_vector_zerop_s(layout->ndims, offset_wrt_chunk) ||
- !H5V_vector_eq_u(layout->ndims, sub_size, layout->dim)) {
- if (H5B_find (f, H5B_ISTORE, &(layout->addr), &udata)>=0 &&
- H5F_addr_defined (&(udata.addr))) {
-
- if (compressed && udata.key.nbytes<chunk_size) {
- if (H5F_block_read(f, &(udata.addr), udata.key.nbytes,
- compressed)<0) {
- HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL,
- "unable to read raw storage chunk");
- }
- if (chunk_size!=H5Z_uncompress (comp, udata.key.nbytes,
- compressed, chunk_size,
- chunk)) {
- HGOTO_ERROR (H5E_IO, H5E_READERROR, FAIL,
- "unable to uncompress data");
- }
- } else {
- assert (chunk_size==udata.key.nbytes);
- if (H5F_block_read(f, &(udata.addr), udata.key.nbytes,
- chunk)<0) {
- HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL,
- "unable to read raw storage chunk");
- }
- }
- } else {
- HDmemset(chunk, 0, chunk_size);
- }
+ if (NULL==(chunk=H5F_istore_lock (f, layout, comp, chunk_offset,
+ naccessed==chunk_size, &idx_hint))) {
+ HRETURN_ERROR (H5E_IO, H5E_WRITEERROR, FAIL,
+ "unable to read raw data chunk");
}
-
- /* Transfer data to the chunk */
H5V_hyper_copy(layout->ndims, sub_size,
layout->dim, offset_wrt_chunk, chunk,
size_m, sub_offset_m, buf);
-
- /* Compress the chunk */
- if (compressed &&
- (nbytes=H5Z_compress (comp, chunk_size, chunk, compressed)) &&
- nbytes<chunk_size) {
- outbuf = compressed;
- } else {
- outbuf = chunk;
- nbytes = chunk_size;
+ if (H5F_istore_unlock (f, layout, comp, TRUE, chunk_offset, &idx_hint,
+ chunk, naccessed)<0) {
+ HRETURN_ERROR (H5E_IO, H5E_WRITEERROR, FAIL,
+ "uanble to unlock raw data chunk");
}
- /*
- * Create the chunk it if it doesn't exist, or reallocate the chunk
- * if its size changed. Then write the data into the file.
- */
- H5F_addr_undef(&(udata.addr));
- udata.key.nbytes = nbytes;
- if (H5B_insert(f, H5B_ISTORE, &(layout->addr), &udata)<0) {
- HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL,
- "unable to allocate chunk");
- }
- if (H5F_block_write(f, &(udata.addr), nbytes, outbuf) < 0) {
- HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL,
- "unable to write raw storage chunk");
- }
-
/* Increment indices */
for (i=layout->ndims-1, carry=1; i>=0 && carry; --i) {
if (++idx_cur[i]>=idx_max[i]) idx_cur[i] = idx_min[i];
@@ -985,12 +1458,8 @@ H5F_istore_write(H5F_t *f, const H5O_layout_t *layout,
}
if (carry) break;
}
- ret_value = SUCCEED;
- done:
- H5MM_xfree(chunk);
- H5MM_xfree (compressed);
- FUNC_LEAVE(ret_value);
+ FUNC_LEAVE(SUCCEED);
}
@@ -1046,6 +1515,61 @@ H5F_istore_create(H5F_t *f, H5O_layout_t *layout /*out */ )
/*-------------------------------------------------------------------------
+ * Function: H5F_istore_stats
+ *
+ * Purpose: Print raw data cache statistics to the stderr stream. If
+ * HEADERS is non-zero then print table column headers,
+ * otherwise assume that the H5AC layer has already printed them.
+ *
+ * Return: Success: SUCCEED
+ *
+ * Failure: FAIL
+ *
+ * Programmer: Robb Matzke
+ * Thursday, May 21, 1998
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5F_istore_stats (H5F_t *f, hbool_t headers)
+{
+ H5F_rdcc_t *rdcc = &(f->shared->rdcc);
+ double miss_rate;
+ char ascii[32];
+
+ FUNC_ENTER (H5F_istore_stats, FAIL);
+
+ if (headers) {
+ fprintf(stderr, "H5F: raw data cache statistics for file %s\n",
+ f->name);
+ fprintf(stderr, " %-18s %8s %8s %8s %8s+%-8s\n",
+ "Layer", "Hits", "Misses", "MissRate", "Inits", "Flushes");
+ fprintf(stderr, " %-18s %8s %8s %8s %8s-%-8s\n",
+ "-----", "----", "------", "--------", "-----", "-------");
+ }
+
+ if (rdcc->nhits>0 || rdcc->nmisses>0) {
+ miss_rate = 100.0 * rdcc->nmisses /
+ (rdcc->nhits + rdcc->nmisses);
+ } else {
+ miss_rate = 0.0;
+ }
+ if (miss_rate > 100) {
+ sprintf(ascii, "%7d%%", (int) (miss_rate + 0.5));
+ } else {
+ sprintf(ascii, "%7.2f%%", miss_rate);
+ }
+
+ fprintf(stderr, " %-18s %8u %8u %7s %8d+%-9ld\n",
+ "raw data chunks", rdcc->nhits, rdcc->nmisses, ascii,
+ rdcc->ninits, (long)(rdcc->nflushes)-(long)(rdcc->ninits));
+ FUNC_LEAVE (SUCCEED);
+}
+
+
+/*-------------------------------------------------------------------------
* Function: H5F_istore_debug
*
* Purpose: Debugs a B-tree node for indexed raw data storage.
diff --git a/src/H5F.c b/src/H5F.c
index 972ffc9..78d70b3 100644
--- a/src/H5F.c
+++ b/src/H5F.c
@@ -97,7 +97,8 @@ static intn interface_initialize_g = FALSE;
static void H5F_term_interface(void);
/* PRIVATE PROTOTYPES */
-static H5F_t *H5F_new(H5F_file_t *shared);
+static H5F_t *H5F_new(H5F_file_t *shared, const H5F_create_t *fcpl,
+ const H5F_access_t *fapl);
static herr_t H5F_dest(H5F_t *f);
static herr_t H5F_flush(H5F_t *f, hbool_t invalidate);
static herr_t H5F_locate_signature(H5F_low_t *f_handle,
@@ -141,6 +142,9 @@ H5F_init_interface(void)
}
/* Initialize the default file access property list */
+ H5F_access_dflt.mdc_nelmts = H5AC_NSLOTS;
+ H5F_access_dflt.rdcc_nbytes = 1024*1024; /*1MB*/
+ H5F_access_dflt.rdcc_w0 = 0.75; /*preempt fully read chunks*/
H5F_access_dflt.driver = H5F_LOW_DFLT;
#if (H5F_LOW_DFLT == H5F_LOW_SEC2)
/* Nothing to initialize */
@@ -468,7 +472,8 @@ H5Fis_hdf5(const char *filename)
* H5Fopen and H5Fcreate functions then fill in various
* fields. If SHARED is a non-null pointer then the shared info
* to which it points has the reference count incremented.
- * Otherwise a new, empty shared info struct is created.
+ * Otherwise a new, empty shared info struct is created and
+ * initialized with the specified file access property list.
*
* Errors:
*
@@ -485,9 +490,11 @@ H5Fis_hdf5(const char *filename)
*-------------------------------------------------------------------------
*/
static H5F_t *
-H5F_new(H5F_file_t *shared)
+H5F_new(H5F_file_t *shared, const H5F_create_t *fcpl, const H5F_access_t *fapl)
{
- H5F_t *f = NULL;
+ H5F_t *f = NULL;
+ intn n;
+
FUNC_ENTER(H5F_new, NULL);
f = H5MM_xcalloc(1, sizeof(H5F_t));
@@ -500,13 +507,39 @@ H5F_new(H5F_file_t *shared)
H5F_addr_undef(&(f->shared->freespace_addr));
H5F_addr_undef(&(f->shared->hdf5_eof));
- /* Create a main cache */
- H5AC_create(f, H5AC_NSLOTS);
+ /*
+ * Deep-copy the file creation and file access property lists into
+ * the new file handle. We do this early because some values might
+ * need to change as the file is being opened.
+ */
+ if (NULL==(f->shared->create_parms=H5P_copy(H5P_FILE_CREATE, fcpl))) {
+ HRETURN_ERROR (H5E_FILE, H5E_CANTINIT, NULL,
+ "unable to copy file creation property list");
+ }
+ if (NULL==(f->shared->access_parms=H5P_copy(H5P_FILE_ACCESS, fapl))) {
+ HRETURN_ERROR (H5E_FILE, H5E_CANTINIT, NULL,
+ "unable to copy file access property list");
+ }
+
+ /*
+ * Create a meta data cache with the specified number of elements.
+ * The cache might be created with a different number of elements and
+ * the access property list should be updated to reflect that.
+ */
+ if ((n=H5AC_create(f, f->shared->access_parms->mdc_nelmts))<0) {
+ HRETURN_ERROR (H5E_FILE, H5E_CANTINIT, NULL,
+ "unable to create meta data cache");
+ }
+ f->shared->access_parms->mdc_nelmts = n;
+
+ /* Create the chunk cache */
+ H5F_istore_init (f);
}
f->shared->nrefs++;
FUNC_LEAVE(f);
}
+
/*-------------------------------------------------------------------------
* Function: H5F_dest
@@ -547,6 +580,10 @@ H5F_dest(H5F_t *f)
HERROR (H5E_FILE, H5E_CANTINIT, "problems closing file");
ret_value = FAIL; /*but keep going*/
}
+ if (H5F_istore_dest (f)<0) {
+ HERROR (H5E_FILE, H5E_CANTINIT, "problems closing file");
+ ret_value = FAIL; /*but keep going*/
+ }
f->shared->cwfs = H5MM_xfree (f->shared->cwfs);
H5P_close (H5P_FILE_CREATE, f->shared->create_parms);
H5P_close (H5P_FILE_ACCESS, f->shared->access_parms);
@@ -735,7 +772,7 @@ H5F_open(const char *name, uintn flags,
old->shared->flags |= H5F_ACC_RDWR;
fd = NULL; /*so we don't close it during error */
}
- f = H5F_new(old->shared);
+ f = H5F_new(old->shared, NULL, NULL);
} else if (flags & H5F_ACC_TRUNC) {
/* Truncate existing file */
@@ -749,7 +786,7 @@ H5F_open(const char *name, uintn flags,
HRETURN_ERROR(H5E_FILE, H5E_CANTCREATE, NULL,
"can't truncate file");
}
- f = H5F_new(NULL);
+ f = H5F_new(NULL, create_parms, access_parms);
f->shared->key = search;
f->shared->flags = flags;
f->shared->lf = fd;
@@ -762,7 +799,7 @@ H5F_open(const char *name, uintn flags,
HRETURN_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL,
"cannot open existing file");
}
- f = H5F_new(NULL);
+ f = H5F_new(NULL, create_parms, access_parms);
f->shared->key = search;
f->shared->flags = flags;
f->shared->lf = fd;
@@ -794,7 +831,7 @@ H5F_open(const char *name, uintn flags,
HRETURN_ERROR(H5E_FILE, H5E_CANTCREATE, NULL,
"can't create file");
}
- f = H5F_new(NULL);
+ f = H5F_new(NULL, create_parms, access_parms);
f->shared->key = search;
f->shared->flags = flags;
f->shared->lf = fd;
@@ -816,13 +853,10 @@ H5F_open(const char *name, uintn flags,
f->name = H5MM_xstrdup(name);
/*
- * Update the file creation parameters and file access parameters with
- * default values if this is the first time this file is opened. Some of
- * the properties may need to be updated.
+ * Some of the properties may need to be updated. We would like to
+ * eventually get rid of this step by not having redundant data!
*/
if (1 == f->shared->nrefs) {
- f->shared->create_parms = H5P_copy (H5P_FILE_CREATE, create_parms);
- f->shared->access_parms = H5P_copy (H5P_FILE_ACCESS, access_parms);
if (H5F_LOW_FAMILY==f->shared->access_parms->driver) {
haddr_t x = f->shared->lf->u.fam.memb_size;
f->shared->access_parms->u.fam.memb_size = x;
@@ -1255,12 +1289,20 @@ H5F_flush(H5F_t *f, hbool_t invalidate)
* once for read-only and once for read-write, and then calling
* H5F_flush() with the read-only handle, still causes data to be flushed.
*/
- if (0 == (H5F_ACC_RDWR & f->shared->flags))
+ if (0 == (H5F_ACC_RDWR & f->shared->flags)) {
HRETURN(SUCCEED);
+ }
- /* flush (and invalidate) the entire cache */
+ /* flush the entire raw data cache */
+ if (H5F_istore_flush (f)<0) {
+ HRETURN_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL,
+ "unable to flush raw data cache");
+ }
+
+ /* flush (and invalidate) the entire meta data cache */
if (H5AC_flush(f, NULL, 0, invalidate) < 0) {
- HRETURN_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush cache");
+ HRETURN_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL,
+ "unable to flush meta data cache");
}
/* encode the file boot block */
@@ -1371,7 +1413,10 @@ H5F_close(H5F_t *f)
}
/* Dump debugging info */
- if (f->intent & H5F_ACC_DEBUG) H5AC_debug(f);
+ if (f->intent & H5F_ACC_DEBUG) {
+ H5AC_debug(f);
+ H5F_istore_stats (f, FALSE);
+ }
/* Close files and release resources */
H5F_low_close(f->shared->lf, f->shared->access_parms);
diff --git a/src/H5Fistore.c b/src/H5Fistore.c
index 7e80f1a..91c4e9d 100644
--- a/src/H5Fistore.c
+++ b/src/H5Fistore.c
@@ -2,8 +2,17 @@
* Copyright (C) 1997 NCSA
* All rights reserved.
*
- * Programmer: Robb Matzke <matzke@llnl.gov>
- * Wednesday, October 8, 1997
+ * Programmer: Robb Matzke <matzke@llnl.gov>
+ * Wednesday, October 8, 1997
+ *
+ * Purpose: Indexed (chunked) I/O functions. The logical
+ * multi-dimensional data space is regularly partitioned into
+ * same-sized "chunks", the first of which is aligned with the
+ * logical origin. The chunks are given a multi-dimensional
+ * index which is used as a lookup key in a B-tree that maps
+ * chunk index to disk address. Each chunk can be compressed
+ * independently and the chunks may move around in the file as
+ * their storage requirements change.
*/
#include <H5private.h>
#include <H5Dprivate.h>
@@ -14,13 +23,25 @@
#include <H5Oprivate.h>
#include <H5Vprivate.h>
-
/* Interface initialization */
#define PABLO_MASK H5F_istore_mask
static hbool_t interface_initialize_g = FALSE;
#define INTERFACE_INIT NULL
-/* PRIVATE PROTOTYPES */
+/* Raw data chunks are cached. Each entry in the cache is: */
+typedef struct H5F_rdcc_ent_t {
+ hbool_t locked; /*entry is locked in cache */
+ hbool_t dirty; /*needs to be written to disk? */
+ H5O_layout_t *layout; /*the layout message */
+ H5O_compress_t *comp; /*compression message */
+ hssize_t offset[H5O_LAYOUT_NDIMS]; /*chunk name */
+ size_t chunk_size; /*size of a chunk */
+ size_t rd_count; /*bytes remaining to be read */
+ size_t wr_count; /*bytes remaining to be written */
+ uint8 *chunk; /*the uncompressed chunk data */
+} H5F_rdcc_ent_t;
+
+/* Private prototypes */
static size_t H5F_istore_sizeof_rkey(H5F_t *f, const void *_udata);
static herr_t H5F_istore_new_node(H5F_t *f, H5B_ins_t, void *_lt_key,
void *_udata, void *_rt_key, haddr_t *);
@@ -546,7 +567,6 @@ H5F_istore_insert(H5F_t *f, const haddr_t *addr, void *_lt_key,
* Already exists. If the new size is not the same as the old size
* then we should reallocate storage.
*/
-#if 1
if (lt_key->nbytes != udata->key.nbytes) {
if (H5MF_realloc (f, H5MF_RAW, lt_key->nbytes, addr,
udata->key.nbytes, new_node/*out*/)<0) {
@@ -561,13 +581,6 @@ H5F_istore_insert(H5F_t *f, const haddr_t *addr, void *_lt_key,
udata->addr = *addr;
ret_value = H5B_INS_NOOP;
}
-#else
- assert (lt_key->nbytes == udata->key.nbytes);
- assert (!H5F_addr_defined (&(udata->addr)) ||
- H5F_addr_eq (&(udata->addr), addr));
- udata->addr = *addr;
- ret_value = H5B_INS_NOOP;
-#endif
} else if (H5V_hyper_disjointp(udata->mesg.ndims,
lt_key->offset, udata->mesg.dim,
@@ -606,6 +619,596 @@ H5F_istore_insert(H5F_t *f, const haddr_t *addr, void *_lt_key,
/*-------------------------------------------------------------------------
+ * Function: H5F_istore_init
+ *
+ * Purpose: Initialize the raw data chunk cache for a file. This is
+ * called when the file handle is initialized.
+ *
+ * Return: Success: SUCCEED
+ *
+ * Failure: FAIL
+ *
+ * Programmer: Robb Matzke
+ * Monday, May 18, 1998
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5F_istore_init (H5F_t *f)
+{
+ H5F_rdcc_t *rdcc = &(f->shared->rdcc);
+
+ FUNC_ENTER (H5F_istore_init, FAIL);
+
+ HDmemset (rdcc, 0, sizeof(H5F_rdcc_t));
+ if (f->shared->access_parms->rdcc_nbytes>0) {
+ rdcc->nslots = 25; /*some initial number of slots*/
+ rdcc->slot = H5MM_xcalloc (rdcc->nslots, sizeof(H5F_rdcc_ent_t));
+ }
+
+ FUNC_LEAVE (SUCCEED);
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5F_istore_flush_entry
+ *
+ * Purpose: Writes a chunk to disk.
+ *
+ * Return: Success: SUCCEED
+ *
+ * Failure: FAIL
+ *
+ * Programmer: Robb Matzke
+ * Thursday, May 21, 1998
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5F_istore_flush_entry (H5F_t *f, H5F_rdcc_ent_t *ent)
+{
+ void *c_buf = NULL; /*temp compression buffer */
+ void *out_ptr = NULL; /*ptr to output buffer */
+ size_t nbytes; /*size of output buffer */
+ herr_t ret_value = FAIL; /*return value */
+ H5F_istore_ud1_t udata; /*pass through B-tree */
+ intn i;
+
+ FUNC_ENTER (H5F_istore_flush_entry, FAIL);
+ assert (ent);
+ assert (!ent->locked);
+ if (!ent->dirty) HRETURN (SUCCEED);
+
+ /* Should the chunk be compressed before writing it to disk? */
+ if (ent->comp && H5Z_NONE!=ent->comp->method) {
+ c_buf = H5MM_xmalloc (ent->chunk_size);
+ nbytes = H5Z_compress (ent->comp, ent->chunk_size, ent->chunk, c_buf);
+ if (nbytes && nbytes<ent->chunk_size) {
+ out_ptr = c_buf;
+ } else {
+ out_ptr = ent->chunk;
+ nbytes = ent->chunk_size;
+ }
+ } else {
+ out_ptr = ent->chunk;
+ nbytes = ent->chunk_size;
+ }
+
+ /*
+ * Create the chunk it if it doesn't exist, or reallocate the chunk if its
+ * size changed. Then write the data into the file.
+ */
+ udata.mesg = *(ent->layout);
+ H5F_addr_undef(&(udata.addr));
+ udata.key.nbytes = nbytes;
+ for (i=0; i<ent->layout->ndims; i++) {
+ udata.key.offset[i] = ent->offset[i];
+ }
+
+ if (H5B_insert(f, H5B_ISTORE, &(ent->layout->addr), &udata)<0) {
+ HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL,
+ "unable to allocate chunk");
+ }
+ if (H5F_block_write (f, &(udata.addr), nbytes, out_ptr)<0) {
+ HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL,
+ "unable to write raw data to file");
+ }
+
+ /* Mark cache entry as clean */
+ ent->dirty = FALSE;
+ f->shared->rdcc.nflushes++;
+ ret_value = SUCCEED;
+
+ done:
+ H5MM_xfree (c_buf);
+ FUNC_LEAVE (ret_value);
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5F_istore_flush
+ *
+ * Purpose: Writes all dirty chunks to disk but does not remove them from
+ * the cache.
+ *
+ * Return: Success: SUCCEED
+ *
+ * Failure: FAIL
+ *
+ * Programmer: Robb Matzke
+ * Thursday, May 21, 1998
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5F_istore_flush (H5F_t *f)
+{
+ H5F_rdcc_t *rdcc = &(f->shared->rdcc);
+ intn i, nerrors=0;
+
+ FUNC_ENTER (H5F_istore_flush, FAIL);
+
+ for (i=0; i<rdcc->nused; i++) {
+ if (H5F_istore_flush_entry (f, rdcc->slot+i)<0) {
+ nerrors++;
+ }
+ }
+ if (nerrors) {
+ HRETURN_ERROR (H5E_IO, H5E_CANTFLUSH, FAIL,
+ "unable to flush one or more raw data chunks");
+ }
+ FUNC_LEAVE (SUCCEED);
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5F_istore_preempt
+ *
+ * Purpose: Preempts the specified entry from the cache, flushing it to
+ * disk if necessary.
+ *
+ * Return: Success: SUCCEED
+ *
+ * Failure: FAIL
+ *
+ * Programmer: Robb Matzke
+ * Thursday, May 21, 1998
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5F_istore_preempt (H5F_t *f, intn idx)
+{
+ H5F_rdcc_t *rdcc = &(f->shared->rdcc);
+ H5F_rdcc_ent_t *ent = rdcc->slot + idx;
+
+ FUNC_ENTER (H5F_istore_preempt, FAIL);
+ assert (idx>=0 && idx<rdcc->nused);
+ assert (!ent->locked);
+
+ if (ent->dirty) H5F_istore_flush_entry (f, ent);
+ H5O_free (H5O_LAYOUT, ent->layout);
+ H5O_free (H5O_COMPRESS, ent->comp);
+ H5MM_xfree (ent->chunk);
+ rdcc->nused -= 1;
+ rdcc->nbytes -= ent->chunk_size;
+ HDmemmove (rdcc->slot+idx, rdcc->slot+idx+1,
+ (rdcc->nused-idx) * sizeof(H5F_rdcc_ent_t));
+
+ FUNC_LEAVE (SUCCEED);
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5F_istore_dest
+ *
+ * Purpose: Destroy the entire chunk cache by flushing dirty entries,
+ * preempting all entries, and freeing the cache itself.
+ *
+ * Return: Success: SUCCEED
+ *
+ * Failure: FAIL
+ *
+ * Programmer: Robb Matzke
+ * Thursday, May 21, 1998
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5F_istore_dest (H5F_t *f)
+{
+ H5F_rdcc_t *rdcc = &(f->shared->rdcc);
+ intn i, nerrors=0;
+
+ FUNC_ENTER (H5F_istore_dest, FAIL);
+
+ for (i=rdcc->nused-1; i>=0; --i) {
+ if (H5F_istore_flush_entry (f, rdcc->slot+i)<0) {
+ nerrors++;
+ }
+ if (H5F_istore_preempt (f, i)<0) {
+ nerrors++;
+ }
+ }
+ if (nerrors) {
+ HRETURN_ERROR (H5E_IO, H5E_CANTFLUSH, FAIL,
+ "unable to flush one or more raw data chunks");
+ }
+
+ H5MM_xfree (rdcc->slot);
+ HDmemset (rdcc, 0, sizeof(H5F_rdcc_t));
+ FUNC_LEAVE (SUCCEED);
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5F_istore_prune
+ *
+ * Purpose: Prune the cache by preempting some things until the cache has
+ * room for something which is SIZE bytes. Only unlocked
+ * entries are considered for preemption.
+ *
+ * Return: Success: SUCCEED
+ *
+ * Failure: FAIL
+ *
+ * Programmer: Robb Matzke
+ * Thursday, May 21, 1998
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5F_istore_prune (H5F_t *f, size_t size)
+{
+ intn i, meth0, meth1, nerrors=0;
+ H5F_rdcc_t *rdcc = &(f->shared->rdcc);
+ H5F_rdcc_ent_t *ent0, *ent1;
+ double w0 = f->shared->access_parms->rdcc_w0;
+ size_t total = f->shared->access_parms->rdcc_nbytes;
+
+ FUNC_ENTER (H5F_istore_prune, FAIL);
+
+ /*
+ * We have two pointers that slide down the cache beginning at the least
+ * recently used entry. The distance between the pointers represents the
+ * relative weight. A weight of 50% for the first pointer means that the
+ * second pointer is half the cache length behind the first pointer.
+ */
+ meth0 = rdcc->nused;
+ meth1 = rdcc->nused * (1.0+w0);
+ for (i=MAX(meth0, meth1)-1;
+ rdcc->nbytes+size>total && i>=0;
+ --i, --meth0, --meth1) {
+
+ ent0 = rdcc->slot+meth0; /*might be a bad pointer!*/
+ ent1 = rdcc->slot+meth1; /*might be a bad pointer!*/
+
+ if (meth0>=0 && meth0<rdcc->nused && !ent0->locked &&
+ (0==ent0->rd_count || ent0->chunk_size==ent0->rd_count) &&
+ (0==ent0->wr_count || ent0->chunk_size==ent0->wr_count)) {
+ /*
+ * Method 0: Preempt entries that have a zero rd_count. If the
+ * application is accessing a dataset with a set of
+ * non-overlapping partial I/O requests then chunks with a zero
+ * rd_count will probably not be accessed in the near future.
+ */
+ if (H5F_istore_preempt (f, meth0)<0) nerrors++;
+
+ } else if (meth1>=0 && meth1<rdcc->nused && !ent1->locked) {
+ /*
+ * Method 1: Discard the least recently used members from the
+ * cache. This is a catch-all.
+ */
+ if (H5F_istore_preempt (f, meth1)<0) nerrors++;
+ }
+ }
+ if (nerrors) {
+ HRETURN_ERROR (H5E_IO, H5E_CANTFLUSH, FAIL,
+ "unable to preempt one or more raw data cache entry");
+ }
+
+ FUNC_LEAVE (SUCCEED);
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5F_istore_lock
+ *
+ * Purpose: Return a pointer to an uncompressed chunk. The pointer
+ * points directly into the chunk cache and should not be freed
+ * by the caller but will be valid until it is unlocked. The
+ * input value IDX_HINT is used to speed up cache lookups and
+ * it's output value should be given to H5F_rdcc_unlock().
+ *
+ * If RELAX is non-zero and the chunk isn't in the cache then
+ * don't try to read it from the file, but just allocate an
+ * uninitialized buffer to hold the result. This is indented
+ * for output functions that are about to overwrite the entire
+ * chunk.
+ *
+ * Return: Success: Ptr to an uncompressed chunk.
+ *
+ * Failure: NULL
+ *
+ * Programmer: Robb Matzke
+ * Thursday, May 21, 1998
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+static void *
+H5F_istore_lock (H5F_t *f, const H5O_layout_t *layout,
+ const H5O_compress_t *comp, const hssize_t offset[],
+ hbool_t relax, intn *idx_hint/*in,out*/)
+{
+ H5F_rdcc_t *rdcc = &(f->shared->rdcc);
+ H5F_rdcc_ent_t *ent = NULL;
+ intn i, j, found = -1;
+ H5F_istore_ud1_t udata; /*B-tree pass-through */
+ size_t chunk_size; /*size of a chunk */
+ herr_t status; /*func return status */
+ void *chunk=NULL; /*the uncompressed chunk*/
+ void *temp=NULL; /*temporary chunk buffer*/
+ void *ret_value=NULL; /*return value */
+
+ FUNC_ENTER (H5F_istore_lock, NULL);
+
+ /* First use the hint */
+ if (idx_hint && *idx_hint>=0 && *idx_hint<rdcc->nused) {
+ ent = rdcc->slot + *idx_hint;
+ if (layout->ndims==ent->layout->ndims) {
+ for (i=0, found=*idx_hint; found>=0 && i<ent->layout->ndims; i++) {
+ if (offset[i]!=ent->offset[i]) found = -1;
+ }
+ }
+ }
+
+ /* Then look at all the entries */
+ for (i=0; found<0 && i<rdcc->nused; i++) {
+ ent = rdcc->slot + i;
+ if (layout->ndims==ent->layout->ndims) {
+ for (j=0, found=i; found>=0 && j<ent->layout->ndims; j++) {
+ if (offset[j]!=ent->offset[j]) found = -1;
+ }
+ }
+ }
+
+
+ if (found>=0) {
+ /*
+ * Already in the cache. Count a hit.
+ */
+ rdcc->nhits++;
+
+ } else if (found<0 && relax) {
+ /*
+ * Not in the cache, but we're about to overwrite the whole thing
+ * anyway, so just allocate a buffer for it but don't initialize that
+ * buffer with the file contents. Count this as a hit instead of a
+ * miss because we saved ourselves lots of work.
+ */
+ rdcc->nhits++;
+ for (i=0, chunk_size=1; i<layout->ndims; i++) {
+ chunk_size *= layout->dim[i];
+ }
+ chunk = H5MM_xmalloc (chunk_size);
+
+ } else {
+ /*
+ * Not in the cache. Read it from the file and count this as a miss
+ * if it's in the file or an init if it isn't.
+ */
+ for (i=0, chunk_size=1; i<layout->ndims; i++) {
+ udata.key.offset[i] = offset[i];
+ chunk_size *= layout->dim[i];
+ }
+ udata.mesg = *layout;
+ H5F_addr_undef (&(udata.addr));
+ status = H5B_find (f, H5B_ISTORE, &(layout->addr), &udata);
+ chunk = H5MM_xmalloc (chunk_size);
+ if (status>=0 && H5F_addr_defined (&(udata.addr))) {
+ /*
+ * The chunk exists on disk but might be compressed. Instead of
+ * allocating the exact size for the compressed chunk we allocate
+ * the entire chunk size -- it reduces strain on the malloc()
+ * subsystem.
+ */
+ if (H5F_block_read (f, &(udata.addr), udata.key.nbytes, chunk)<0) {
+ HGOTO_ERROR (H5E_IO, H5E_READERROR, NULL,
+ "unable to read raw data chunk");
+ }
+ if (udata.key.nbytes<chunk_size) {
+ temp = H5MM_xmalloc (chunk_size);
+ if (chunk_size!=H5Z_uncompress (comp, udata.key.nbytes,
+ chunk, chunk_size, temp)) {
+ HGOTO_ERROR (H5E_IO, H5E_READERROR, NULL,
+ "unable to uncompress raw data chunk");
+ }
+ H5MM_xfree (chunk);
+ chunk = temp;
+ temp = NULL;
+ }
+ rdcc->nmisses++;
+ } else {
+ /*
+ * The chunk doesn't exist in the file. Assume all zeros.
+ */
+ HDmemset (chunk, 0, chunk_size);
+ rdcc->ninits++;
+ }
+ }
+
+ if (found<0 && chunk_size<=f->shared->access_parms->rdcc_nbytes) {
+ /*
+ * Add the chunk to the beginning of the cache after pruning the cache
+ * to make room.
+ */
+ if (H5F_istore_prune (f, chunk_size)<0) {
+ H5E_clear ();
+ }
+ if (rdcc->nused>=rdcc->nslots) {
+ rdcc->nslots = MAX (25, 2*rdcc->nslots);
+ rdcc->slot = H5MM_xrealloc (rdcc->slot,
+ (rdcc->nslots*
+ sizeof(H5F_rdcc_ent_t)));
+ }
+ HDmemmove (rdcc->slot+1, rdcc->slot,
+ rdcc->nused*sizeof(H5F_rdcc_ent_t));
+ rdcc->nused++;
+ rdcc->nbytes += chunk_size;
+ ent = rdcc->slot;
+ ent->locked = 0;
+ ent->dirty = FALSE;
+ ent->chunk_size = chunk_size;
+ ent->layout = H5O_copy (H5O_LAYOUT, layout);
+ ent->comp = H5O_copy (H5O_COMPRESS, comp);
+ for (i=0; i<layout->ndims; i++) {
+ ent->offset[i] = offset[i];
+ }
+ ent->rd_count = chunk_size;
+ ent->wr_count = chunk_size;
+ ent->chunk = chunk;
+ found = 0;
+
+ } else if (found<0) {
+ /*
+ * The chunk is larger than the entire cache so we don't cache it.
+ * This is the reason all those arguments have to be repeated for the
+ * unlock function.
+ */
+ ent = NULL;
+ found = -999;
+
+ } else if (found>0) {
+ /*
+ * The chunk is not at the beginning of the cache; move it forward by
+ * one slot. This is how we implement the LRU preemption algorithm.
+ */
+ H5F_rdcc_ent_t x = rdcc->slot[found];
+ rdcc->slot[found] = rdcc->slot[found-1];
+ rdcc->slot[found-1] = x;
+ ent = rdcc->slot + --found;
+ }
+
+ /* Lock the chunk into the cache */
+ if (ent) {
+ assert (!ent->locked);
+ ent->locked = TRUE;
+ if (idx_hint) *idx_hint = found;
+ chunk = ent->chunk;
+ }
+
+ ret_value = chunk;
+ done:
+ if (!ret_value) H5MM_xfree (chunk);
+ H5MM_xfree (temp);
+ FUNC_LEAVE (ret_value);
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5F_istore_unlock
+ *
+ * Purpose: Unlocks a previously locked chunk. The LAYOUT, COMP, and
+ * OFFSET arguments should be the same as for H5F_rdcc_lock().
+ * The DIRTY argument should be set to non-zero if the chunk has
+ * been modified since it was locked. The IDX_HINT argument is
+ * the returned index hint from the lock operation and BUF is
+ * the return value from the lock.
+ *
+ * The NACCESSED argument should be the number of bytes accessed
+ * for reading or writing (depending on the value of DIRTY).
+ * It's only purpose is to provide additional information to the
+ * preemption policy.
+ *
+ * Return: Success: SUCCEED
+ *
+ * Failure: FAIL
+ *
+ * Programmer: Robb Matzke
+ * Thursday, May 21, 1998
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5F_istore_unlock (H5F_t *f, const H5O_layout_t *layout,
+ const H5O_compress_t *comp, hbool_t dirty,
+ const hssize_t offset[], intn *idx_hint,
+ uint8 *chunk, size_t naccessed)
+{
+ H5F_rdcc_t *rdcc = &(f->shared->rdcc);
+ H5F_rdcc_ent_t *ent = NULL;
+ intn i, found = -1;
+
+ FUNC_ENTER (H5F_istore_unlock, FAIL);
+
+ /* First look at the hint */
+ if (idx_hint && *idx_hint>=0 && *idx_hint<rdcc->nused) {
+ if (rdcc->slot[*idx_hint].chunk==chunk) found = *idx_hint;
+ }
+
+ /* Then look at all the entries */
+ for (i=0; found<0 && i<rdcc->nused; i++) {
+ if (rdcc->slot[i].chunk==chunk) found = i;
+ }
+
+ if (found<0) {
+ /*
+ * It's not in the cache, probably because it's too big. If it's
+ * dirty then flush it to disk. In any case, free the chunk.
+ * Note: we have to copy the layout and compression messages so we
+ * don't discard the `const' qualifier.
+ */
+ if (dirty) {
+ H5F_rdcc_ent_t x;
+ HDmemset (&x, 0, sizeof x);
+ x.dirty = TRUE;
+ x.layout = H5O_copy (H5O_LAYOUT, layout);
+ x.comp = H5O_copy (H5O_COMPRESS, comp);
+ for (i=0; i<layout->ndims; i++) {
+ x.offset[i] = offset[i];
+ }
+ x.chunk = chunk;
+ H5F_istore_flush_entry (f, &x);
+ H5O_free (H5O_LAYOUT, x.layout);
+ H5O_free (H5O_COMPRESS, x.comp);
+ }
+ H5MM_xfree (chunk);
+ } else {
+ /*
+ * It's in the cache so unlock it.
+ */
+ ent = rdcc->slot + found;
+ assert (ent->locked);
+ if (dirty) {
+ ent->dirty = TRUE;
+ ent->wr_count -= MIN (ent->wr_count, naccessed);
+ } else {
+ ent->rd_count -= MIN (ent->rd_count, naccessed);
+ }
+ ent->locked = FALSE;
+ }
+
+ FUNC_LEAVE (SUCCEED);
+}
+
+
+/*-------------------------------------------------------------------------
* Function: H5F_istore_read
*
* Purpose: Reads a multi-dimensional buffer from (part of) an indexed raw
@@ -629,18 +1232,17 @@ H5F_istore_read(H5F_t *f, const H5O_layout_t *layout,
{
hssize_t offset_m[H5O_LAYOUT_NDIMS];
hsize_t size_m[H5O_LAYOUT_NDIMS];
- intn i, carry;
hsize_t idx_cur[H5O_LAYOUT_NDIMS];
hsize_t idx_min[H5O_LAYOUT_NDIMS];
hsize_t idx_max[H5O_LAYOUT_NDIMS];
hsize_t sub_size[H5O_LAYOUT_NDIMS];
hssize_t offset_wrt_chunk[H5O_LAYOUT_NDIMS];
hssize_t sub_offset_m[H5O_LAYOUT_NDIMS];
- size_t chunk_size;
- uint8 *chunk=NULL, *compressed=NULL;
- H5F_istore_ud1_t udata;
- herr_t status;
- herr_t ret_value = FAIL;
+ hssize_t chunk_offset[H5O_LAYOUT_NDIMS];
+ intn i, carry;
+ size_t naccessed; /*bytes accessed in chnk*/
+ uint8 *chunk=NULL; /*ptr to a chunk buffer */
+ intn idx_hint=0; /*cache index hint */
FUNC_ENTER(H5F_istore_read, FAIL);
@@ -672,61 +1274,10 @@ H5F_istore_read(H5F_t *f, const H5O_layout_t *layout,
}
#endif
- /* Determine the chunk size and allocate buffers */
- for (i=0, chunk_size=1; i<layout->ndims; i++) {
- chunk_size *= layout->dim[i];
- }
- chunk = H5MM_xmalloc(chunk_size);
- if (comp && H5Z_NONE!=comp->method) {
- compressed = H5MM_xmalloc (chunk_size);
- }
-
- /*
- * As a special case if the source is aligned on a chunk boundary and is
- * the same size as a chunk, and the destination is the same size as a
- * chunk, then instead of reading into a temporary buffer and then into
- * the destination, we read directly into the destination.
- */
- for (i=0; i<layout->ndims; i++) {
- if (offset_f[i] % layout->dim[i]) break; /*src not aligned*/
- if (size[i]!=layout->dim[i]) break; /*src not a chunk*/
- if (size_m[i]!=layout->dim[i]) break; /*dst not a chunk*/
- udata.key.offset[i] = offset_f[i];
- }
- if (i==layout->ndims) {
- udata.mesg = *layout;
- H5F_addr_undef (&(udata.addr));
- status = H5B_find (f, H5B_ISTORE, &(layout->addr), &udata);
- if (status>=0 && H5F_addr_defined (&(udata.addr))) {
- if (compressed && udata.key.nbytes<chunk_size) {
- if (H5F_block_read (f, &(udata.addr), udata.key.nbytes,
- compressed)<0) {
- HGOTO_ERROR (H5E_IO, H5E_READERROR, FAIL,
- "unable to read raw storage chunk");
- }
- if (chunk_size!=H5Z_uncompress (comp, udata.key.nbytes,
- compressed, chunk_size, buf)) {
- HGOTO_ERROR (H5E_IO, H5E_READERROR, FAIL,
- "unable to uncompress raw storage chunk");
- }
- } else {
- assert (udata.key.nbytes==chunk_size);
- if (H5F_block_read (f, &(udata.addr), chunk_size, buf)<0) {
- HGOTO_ERROR (H5E_IO, H5E_READERROR, FAIL,
- "unable to read raw storage chunk");
- }
- }
- } else {
- HDmemset (buf, 0, chunk_size);
- }
- HGOTO_DONE (SUCCEED);
- }
-
/*
- * This is the general case. We set up multi-dimensional counters
- * (idx_min, idx_max, and idx_cur) and loop through the chunks compressing
- * or copying each chunk into a temporary buffer, and then copying it to
- * it's destination.
+ * Set up multi-dimensional counters (idx_min, idx_max, and idx_cur) and
+ * loop through the chunks copying each to its final destination in the
+ * application buffer.
*/
for (i=0; i<layout->ndims; i++) {
idx_min[i] = offset_f[i] / layout->dim[i];
@@ -734,59 +1285,42 @@ H5F_istore_read(H5F_t *f, const H5O_layout_t *layout,
idx_cur[i] = idx_min[i];
}
- /* Initialize non-changing part of udata */
- udata.mesg = *layout;
-
/* Loop over all chunks */
while (1) {
-
- for (i=0; i<layout->ndims; i++) {
+ for (i=0, naccessed=1; i<layout->ndims; i++) {
/* The location and size of the chunk being accessed */
assert (layout->dim[i] < MAX_HSSIZET);
- udata.key.offset[i] = idx_cur[i] * (hssize_t)(layout->dim[i]);
+ chunk_offset[i] = idx_cur[i] * (hssize_t)(layout->dim[i]);
/* The offset and size wrt the chunk */
- offset_wrt_chunk[i] = MAX(offset_f[i], udata.key.offset[i]) -
- udata.key.offset[i];
+ offset_wrt_chunk[i] = MAX(offset_f[i], chunk_offset[i]) -
+ chunk_offset[i];
sub_size[i] = MIN((idx_cur[i]+1)*layout->dim[i],
offset_f[i]+size[i]) -
- (udata.key.offset[i] + offset_wrt_chunk[i]);
+ (chunk_offset[i] + offset_wrt_chunk[i]);
+ naccessed *= sub_size[i];
/* Offset into mem buffer */
- sub_offset_m[i] = udata.key.offset[i] + offset_wrt_chunk[i] +
+ sub_offset_m[i] = chunk_offset[i] + offset_wrt_chunk[i] +
offset_m[i] - offset_f[i];
}
- /* Read chunk */
- H5F_addr_undef(&(udata.addr));
- status = H5B_find(f, H5B_ISTORE, &(layout->addr), &udata);
- if (status>=0 && H5F_addr_defined(&(udata.addr))) {
- if (compressed && udata.key.nbytes<chunk_size) {
- if (H5F_block_read (f, &(udata.addr), udata.key.nbytes,
- compressed)<0) {
- HGOTO_ERROR (H5E_IO, H5E_READERROR, FAIL,
- "unable to read raw storage chunk");
- }
- if (chunk_size!=H5Z_uncompress (comp, udata.key.nbytes,
- compressed, chunk_size,
- chunk)) {
- HGOTO_ERROR (H5E_IO, H5E_READERROR, FAIL,
- "unable to uncompress data");
- }
- } else {
- assert (udata.key.nbytes == chunk_size);
- if (H5F_block_read(f, &(udata.addr), chunk_size, chunk) < 0) {
- HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL,
- "unable to read raw storage chunk");
- }
- }
- } else {
- HDmemset(chunk, 0, chunk_size);
+ /*
+ * Lock the chunk, transfer data to the application, then unlock the
+ * chunk.
+ */
+ if (NULL==(chunk=H5F_istore_lock (f, layout, comp, chunk_offset,
+ FALSE, &idx_hint))) {
+ HRETURN_ERROR (H5E_IO, H5E_READERROR, FAIL,
+ "unable to read raw data chunk");
}
-
- /* Transfer data from the chunk buffer to the application */
H5V_hyper_copy(layout->ndims, sub_size, size_m, sub_offset_m,
- (void *)buf, layout->dim, offset_wrt_chunk, chunk);
+ (void*)buf, layout->dim, offset_wrt_chunk, chunk);
+ if (H5F_istore_unlock (f, layout, comp, FALSE, chunk_offset, &idx_hint,
+ chunk, naccessed)<0) {
+ HRETURN_ERROR (H5E_IO, H5E_READERROR, FAIL,
+ "unable to unlock raw data chunk");
+ }
/* Increment indices */
for (i=layout->ndims-1, carry=1; i>=0 && carry; --i) {
@@ -795,12 +1329,7 @@ H5F_istore_read(H5F_t *f, const H5O_layout_t *layout,
}
if (carry) break;
}
- ret_value = SUCCEED;
-
- done:
- H5MM_xfree(chunk);
- H5MM_xfree (compressed);
- FUNC_LEAVE(ret_value);
+ FUNC_LEAVE(SUCCEED);
}
@@ -833,12 +1362,12 @@ H5F_istore_write(H5F_t *f, const H5O_layout_t *layout,
hsize_t idx_min[H5O_LAYOUT_NDIMS];
hsize_t idx_max[H5O_LAYOUT_NDIMS];
hsize_t sub_size[H5O_LAYOUT_NDIMS];
+ hssize_t chunk_offset[H5O_LAYOUT_NDIMS];
hssize_t offset_wrt_chunk[H5O_LAYOUT_NDIMS];
hssize_t sub_offset_m[H5O_LAYOUT_NDIMS];
- hsize_t chunk_size, nbytes;
- uint8 *chunk=NULL, *compressed=NULL, *outbuf;
- H5F_istore_ud1_t udata;
- herr_t ret_value = FAIL;
+ uint8 *chunk=NULL;
+ intn idx_hint=0;
+ size_t chunk_size, naccessed;
FUNC_ENTER(H5F_istore_write, FAIL);
@@ -855,9 +1384,10 @@ H5F_istore_write(H5F_t *f, const H5O_layout_t *layout,
* For now the source must not be a hyperslab. It must be an entire
* memory buffer.
*/
- for (i=0; i<layout->ndims; i++) {
+ for (i=0, chunk_size=1; i<layout->ndims; i++) {
offset_m[i] = 0;
size_m[i] = size[i];
+ chunk_size *= layout->dim[i];
}
#ifndef NDEBUG
@@ -871,10 +1401,9 @@ H5F_istore_write(H5F_t *f, const H5O_layout_t *layout,
#endif
/*
- * This is the general case. We set up multi-dimensional counters
- * (idx_min, idx_max, and idx_cur) and loop through the chunks copying
- * each chunk into a temporary buffer, compressing or decompressing, and
- * then copying it to it's destination.
+ * Set up multi-dimensional counters (idx_min, idx_max, and idx_cur) and
+ * loop through the chunks copying each chunk from the application to the
+ * chunk cache.
*/
for (i=0; i<layout->ndims; i++) {
idx_min[i] = offset_f[i] / layout->dim[i];
@@ -882,102 +1411,46 @@ H5F_istore_write(H5F_t *f, const H5O_layout_t *layout,
idx_cur[i] = idx_min[i];
}
- /* Allocate buffers */
- for (i=0, chunk_size=1; i<layout->ndims; i++) {
- chunk_size *= layout->dim[i];
- }
- chunk = H5MM_xmalloc(chunk_size);
- if (comp && H5Z_NONE!=comp->method) {
- compressed = H5MM_xmalloc (chunk_size);
- }
-
- /* Initialize non-changing part of udata */
- udata.mesg = *layout;
/* Loop over all chunks */
while (1) {
-
- for (i=0; i<layout->ndims; i++) {
+
+ for (i=0, naccessed=1; i<layout->ndims; i++) {
/* The location and size of the chunk being accessed */
assert (layout->dim[i] < MAX_HSSIZET);
- udata.key.offset[i] = idx_cur[i] * (hssize_t)(layout->dim[i]);
+ chunk_offset[i] = idx_cur[i] * (hssize_t)(layout->dim[i]);
/* The offset and size wrt the chunk */
- offset_wrt_chunk[i] = MAX(offset_f[i], udata.key.offset[i]) -
- udata.key.offset[i];
+ offset_wrt_chunk[i] = MAX(offset_f[i], chunk_offset[i]) -
+ chunk_offset[i];
sub_size[i] = MIN((idx_cur[i]+1)*layout->dim[i],
offset_f[i]+size[i]) -
- (udata.key.offset[i] + offset_wrt_chunk[i]);
+ (chunk_offset[i] + offset_wrt_chunk[i]);
+ naccessed *= sub_size[i];
/* Offset into mem buffer */
- sub_offset_m[i] = udata.key.offset[i] + offset_wrt_chunk[i] +
+ sub_offset_m[i] = chunk_offset[i] + offset_wrt_chunk[i] +
offset_m[i] - offset_f[i];
}
-
+
/*
- * If we are writing a partial chunk then load the chunk from disk
- * and uncompress it if it exists.
+ * Lock the chunk, copy from application to chunk, then unlock the
+ * chunk.
*/
- if (!H5V_vector_zerop_s(layout->ndims, offset_wrt_chunk) ||
- !H5V_vector_eq_u(layout->ndims, sub_size, layout->dim)) {
- if (H5B_find (f, H5B_ISTORE, &(layout->addr), &udata)>=0 &&
- H5F_addr_defined (&(udata.addr))) {
-
- if (compressed && udata.key.nbytes<chunk_size) {
- if (H5F_block_read(f, &(udata.addr), udata.key.nbytes,
- compressed)<0) {
- HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL,
- "unable to read raw storage chunk");
- }
- if (chunk_size!=H5Z_uncompress (comp, udata.key.nbytes,
- compressed, chunk_size,
- chunk)) {
- HGOTO_ERROR (H5E_IO, H5E_READERROR, FAIL,
- "unable to uncompress data");
- }
- } else {
- assert (chunk_size==udata.key.nbytes);
- if (H5F_block_read(f, &(udata.addr), udata.key.nbytes,
- chunk)<0) {
- HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL,
- "unable to read raw storage chunk");
- }
- }
- } else {
- HDmemset(chunk, 0, chunk_size);
- }
+ if (NULL==(chunk=H5F_istore_lock (f, layout, comp, chunk_offset,
+ naccessed==chunk_size, &idx_hint))) {
+ HRETURN_ERROR (H5E_IO, H5E_WRITEERROR, FAIL,
+ "unable to read raw data chunk");
}
-
- /* Transfer data to the chunk */
H5V_hyper_copy(layout->ndims, sub_size,
layout->dim, offset_wrt_chunk, chunk,
size_m, sub_offset_m, buf);
-
- /* Compress the chunk */
- if (compressed &&
- (nbytes=H5Z_compress (comp, chunk_size, chunk, compressed)) &&
- nbytes<chunk_size) {
- outbuf = compressed;
- } else {
- outbuf = chunk;
- nbytes = chunk_size;
+ if (H5F_istore_unlock (f, layout, comp, TRUE, chunk_offset, &idx_hint,
+ chunk, naccessed)<0) {
+ HRETURN_ERROR (H5E_IO, H5E_WRITEERROR, FAIL,
+ "uanble to unlock raw data chunk");
}
- /*
- * Create the chunk it if it doesn't exist, or reallocate the chunk
- * if its size changed. Then write the data into the file.
- */
- H5F_addr_undef(&(udata.addr));
- udata.key.nbytes = nbytes;
- if (H5B_insert(f, H5B_ISTORE, &(layout->addr), &udata)<0) {
- HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL,
- "unable to allocate chunk");
- }
- if (H5F_block_write(f, &(udata.addr), nbytes, outbuf) < 0) {
- HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL,
- "unable to write raw storage chunk");
- }
-
/* Increment indices */
for (i=layout->ndims-1, carry=1; i>=0 && carry; --i) {
if (++idx_cur[i]>=idx_max[i]) idx_cur[i] = idx_min[i];
@@ -985,12 +1458,8 @@ H5F_istore_write(H5F_t *f, const H5O_layout_t *layout,
}
if (carry) break;
}
- ret_value = SUCCEED;
- done:
- H5MM_xfree(chunk);
- H5MM_xfree (compressed);
- FUNC_LEAVE(ret_value);
+ FUNC_LEAVE(SUCCEED);
}
@@ -1046,6 +1515,61 @@ H5F_istore_create(H5F_t *f, H5O_layout_t *layout /*out */ )
/*-------------------------------------------------------------------------
+ * Function: H5F_istore_stats
+ *
+ * Purpose: Print raw data cache statistics to the stderr stream. If
+ * HEADERS is non-zero then print table column headers,
+ * otherwise assume that the H5AC layer has already printed them.
+ *
+ * Return: Success: SUCCEED
+ *
+ * Failure: FAIL
+ *
+ * Programmer: Robb Matzke
+ * Thursday, May 21, 1998
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5F_istore_stats (H5F_t *f, hbool_t headers)
+{
+ H5F_rdcc_t *rdcc = &(f->shared->rdcc);
+ double miss_rate;
+ char ascii[32];
+
+ FUNC_ENTER (H5F_istore_stats, FAIL);
+
+ if (headers) {
+ fprintf(stderr, "H5F: raw data cache statistics for file %s\n",
+ f->name);
+ fprintf(stderr, " %-18s %8s %8s %8s %8s+%-8s\n",
+ "Layer", "Hits", "Misses", "MissRate", "Inits", "Flushes");
+ fprintf(stderr, " %-18s %8s %8s %8s %8s-%-8s\n",
+ "-----", "----", "------", "--------", "-----", "-------");
+ }
+
+ if (rdcc->nhits>0 || rdcc->nmisses>0) {
+ miss_rate = 100.0 * rdcc->nmisses /
+ (rdcc->nhits + rdcc->nmisses);
+ } else {
+ miss_rate = 0.0;
+ }
+ if (miss_rate > 100) {
+ sprintf(ascii, "%7d%%", (int) (miss_rate + 0.5));
+ } else {
+ sprintf(ascii, "%7.2f%%", miss_rate);
+ }
+
+ fprintf(stderr, " %-18s %8u %8u %7s %8d+%-9ld\n",
+ "raw data chunks", rdcc->nhits, rdcc->nmisses, ascii,
+ rdcc->ninits, (long)(rdcc->nflushes)-(long)(rdcc->ninits));
+ FUNC_LEAVE (SUCCEED);
+}
+
+
+/*-------------------------------------------------------------------------
* Function: H5F_istore_debug
*
* Purpose: Debugs a B-tree node for indexed raw data storage.
diff --git a/src/H5Fprivate.h b/src/H5Fprivate.h
index cce26e1..26942ce 100644
--- a/src/H5Fprivate.h
+++ b/src/H5Fprivate.h
@@ -238,6 +238,9 @@ typedef struct H5F_create_t {
* File-access property list.
*/
typedef struct H5F_access_t {
+ intn mdc_nelmts; /* Size of meta data cache (nelmts) */
+ size_t rdcc_nbytes; /* Size of raw data chunk cache (bytes) */
+ double rdcc_w0; /* Preempt read chunks first? [0.0..1.0]*/
H5F_driver_t driver; /* Low level file driver */
union {
@@ -386,6 +389,18 @@ extern const H5F_low_class_t H5F_LOW_SPLIT_g[]; /* Split meta/raw data */
extern const H5F_low_class_t H5F_LOW_MPIO_g[]; /* MPI-IO */
#endif
+/* The raw data chunk cache */
+typedef struct H5F_rdcc_t {
+ uintn ninits; /* Number of chunk creations */
+ uintn nhits; /* Number of cache hits */
+ uintn nmisses;/* Number of cache misses */
+ uintn nflushes;/* Number of cache flushes */
+ size_t nbytes; /* Current cached raw data in bytes */
+ intn nslots; /* Number of chunk slots allocated */
+ intn nused; /* Number of chunk slots in use */
+ struct H5F_rdcc_ent_t *slot; /* Chunk slots, each points to a chunk */
+} H5F_rdcc_t;
+
/*
* Define the structure to store the file information for HDF5 files. One of
* these structures is allocated per file, not per H5Fopen().
@@ -406,6 +421,7 @@ typedef struct H5F_file_t {
struct H5G_t *root_grp; /* Open root group */
intn ncwfs; /* Num entries on cwfs list */
struct H5HG_heap_t **cwfs; /* Global heap cache */
+ H5F_rdcc_t rdcc; /* Raw data chunk cache */
} H5F_file_t;
/*
@@ -494,6 +510,10 @@ herr_t H5F_arr_write (H5F_t *f, const struct H5O_layout_t *layout,
const hssize_t file_offset[], const void *_buf);
/* Functions that operate on indexed storage */
+herr_t H5F_istore_init (H5F_t *f);
+herr_t H5F_istore_flush (H5F_t *f);
+herr_t H5F_istore_dest (H5F_t *f);
+herr_t H5F_istore_stats (H5F_t *f, hbool_t headers);
herr_t H5F_istore_create(H5F_t *f, struct H5O_layout_t *layout /*in,out*/);
herr_t H5F_istore_read(H5F_t *f, const struct H5O_layout_t *layout,
const struct H5O_compress_t *comp,
diff --git a/src/H5MM.c b/src/H5MM.c
index 329a4f0..5a02f41 100644
--- a/src/H5MM.c
+++ b/src/H5MM.c
@@ -1,59 +1,61 @@
/*-------------------------------------------------------------------------
- * Copyright (C) 1997 National Center for Supercomputing Applications.
- * All rights reserved.
+ * Copyright (C) 1997 National Center for Supercomputing Applications.
+ * All rights reserved.
*
*-------------------------------------------------------------------------
*
- * Created: H5MM.c
- * Jul 10 1997
- * Robb Matzke <matzke@llnl.gov>
+ * Created: H5MM.c
+ * Jul 10 1997
+ * Robb Matzke <matzke@llnl.gov>
*
- * Purpose: Memory management functions.
+ * Purpose: Memory management functions.
*
- * Modifications:
+ * Modifications:
*
*-------------------------------------------------------------------------
*/
#include <H5private.h>
#include <H5MMprivate.h>
+
/*-------------------------------------------------------------------------
- * Function: H5MM_xmalloc
+ * Function: H5MM_xmalloc
*
- * Purpose: Just like malloc(3) except it aborts on an error.
+ * Purpose: Just like malloc(3) except it aborts on an error.
*
- * Return: Success: Ptr to new memory.
+ * Return: Success: Ptr to new memory.
*
- * Failure: abort()
+ * Failure: abort()
*
- * Programmer: Robb Matzke
- * matzke@llnl.gov
- * Jul 10 1997
+ * Programmer: Robb Matzke
+ * matzke@llnl.gov
+ * Jul 10 1997
*
* Modifications:
*
*-------------------------------------------------------------------------
*/
-void *
+void *
H5MM_xmalloc(size_t size)
{
- void *mem = HDmalloc(size);
+ void *mem = HDmalloc(size);
assert(mem);
return mem;
}
+
/*-------------------------------------------------------------------------
- * Function: H5MM_xcalloc
+ * Function: H5MM_xcalloc
*
- * Purpose: Just like calloc(3) except it aborts on an error.
+ * Purpose: Just like calloc(3) except it aborts on an error.
*
- * Return: Success: Ptr to memory.
+ * Return: Success: Ptr to memory.
*
- * Failure: abort()
+ * Failure: abort()
*
- * Programmer: Robb Matzke
- * matzke@llnl.gov
- * Jul 10 1997
+ * Programmer: Robb Matzke
+ * matzke@llnl.gov
+ * Jul 10 1997
*
* Modifications:
*
@@ -73,64 +75,65 @@ H5MM_xcalloc(intn n, size_t size)
return mem;
}
+
/*-------------------------------------------------------------------------
- * Function: H5MM_xrealloc
+ * Function: H5MM_xrealloc
*
- * Purpose: Just like the POSIX version of realloc(3) exept it aborts
- * on an error. Specifically, the following calls are
- * equivalent
+ * Purpose: Just like the POSIX version of realloc(3) exept it aborts
+ * on an error. Specifically, the following calls are
+ * equivalent
*
- * H5MM_xrealloc (NULL, size) <==> H5MM_xmalloc (size)
- * H5MM_xrealloc (ptr, 0) <==> H5MM_xfree (ptr)
- * H5MM_xrealloc (NULL, 0) <==> NULL
+ * H5MM_xrealloc (NULL, size) <==> H5MM_xmalloc (size)
+ * H5MM_xrealloc (ptr, 0) <==> H5MM_xfree (ptr)
+ * H5MM_xrealloc (NULL, 0) <==> NULL
*
- * Return: Success: Ptr to new memory or NULL if the memory
- * was freed.
+ * Return: Success: Ptr to new memory or NULL if the memory
+ * was freed.
*
- * Failure: abort()
+ * Failure: abort()
*
- * Programmer: Robb Matzke
- * matzke@llnl.gov
- * Jul 10 1997
+ * Programmer: Robb Matzke
+ * matzke@llnl.gov
+ * Jul 10 1997
*
* Modifications:
*
*-------------------------------------------------------------------------
*/
-void *
+void *
H5MM_xrealloc(void *mem, size_t size)
{
if (!mem) {
- if (0 == size)
- return NULL;
- mem = H5MM_xmalloc(size);
+ if (0 == size) return NULL;
+ mem = H5MM_xmalloc(size);
} else if (0 == size) {
- mem = H5MM_xfree(mem);
+ mem = H5MM_xfree(mem);
} else {
- mem = HDrealloc(mem, size);
- assert(mem);
+ mem = HDrealloc(mem, size);
+ assert(mem);
}
return mem;
}
+
/*-------------------------------------------------------------------------
- * Function: H5MM_strdup
+ * Function: H5MM_strdup
*
- * Purpose: Duplicates a string. If the string to be duplicated is the
- * null pointer, then return null. If the string to be duplicated
- * is the empty string then return a new empty string.
+ * Purpose: Duplicates a string. If the string to be duplicated is the
+ * null pointer, then return null. If the string to be duplicated
+ * is the empty string then return a new empty string.
*
- * Return: Success: Ptr to a new string (or null if no string).
+ * Return: Success: Ptr to a new string (or null if no string).
*
- * Failure: abort()
+ * Failure: abort()
*
- * Programmer: Robb Matzke
- * matzke@llnl.gov
- * Jul 10 1997
+ * Programmer: Robb Matzke
+ * matzke@llnl.gov
+ * Jul 10 1997
*
* Modifications:
*
@@ -139,30 +142,31 @@ H5MM_xrealloc(void *mem, size_t size)
char *
H5MM_xstrdup(const char *s)
{
- char *mem;
+ char *mem;
if (!s) return NULL;
mem = H5MM_xmalloc(HDstrlen(s) + 1);
HDstrcpy(mem, s);
return mem;
}
+
/*-------------------------------------------------------------------------
- * Function: H5MM_xfree
+ * Function: H5MM_xfree
*
- * Purpose: Just like free(3) except null pointers are allowed as
- * arguments, and the return value (always NULL) can be
- * assigned to the pointer whose memory was just freed:
+ * Purpose: Just like free(3) except null pointers are allowed as
+ * arguments, and the return value (always NULL) can be
+ * assigned to the pointer whose memory was just freed:
*
- * thing = H5MM_xfree (thing);
+ * thing = H5MM_xfree (thing);
*
- * Return: Success: NULL
+ * Return: Success: NULL
*
- * Failure: never fails
+ * Failure: never fails
*
- * Programmer: Robb Matzke
- * matzke@llnl.gov
- * Jul 10 1997
+ * Programmer: Robb Matzke
+ * matzke@llnl.gov
+ * Jul 10 1997
*
* Modifications:
*
diff --git a/src/H5O.c b/src/H5O.c
index 35dbe79..ce369cf 100644
--- a/src/H5O.c
+++ b/src/H5O.c
@@ -667,6 +667,76 @@ H5O_reset(const H5O_class_t *type, void *native)
/*-------------------------------------------------------------------------
+ * Function: H5O_free
+ *
+ * Purpose: Similar to H5O_reset() except it also frees the message
+ * pointer.
+ *
+ * Return: Success: NULL
+ *
+ * Failure: NULL
+ *
+ * Programmer: Robb Matzke
+ * Thursday, May 21, 1998
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+void *
+H5O_free (const H5O_class_t *type, void *mesg)
+{
+ FUNC_ENTER (H5O_free, NULL);
+
+ if (mesg) {
+ H5O_reset (type, mesg);
+ H5MM_xfree (mesg);
+ }
+
+ FUNC_LEAVE (NULL);
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5O_copy
+ *
+ * Purpose: Copies a message. If MESG is is the null pointer then a null
+ * pointer is returned with no error.
+ *
+ * Return: Success: Ptr to the new message
+ *
+ * Failure: NULL
+ *
+ * Programmer: Robb Matzke
+ * Thursday, May 21, 1998
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+void *
+H5O_copy (const H5O_class_t *type, const void *mesg)
+{
+ void *ret_value = NULL;
+
+ FUNC_ENTER (H5O_copy, NULL);
+
+ assert (type);
+ assert (type->copy);
+
+ if (mesg) {
+ if (NULL==(ret_value=(type->copy)(mesg, NULL))) {
+ HRETURN_ERROR (H5E_OHDR, H5E_CANTINIT, NULL,
+ "unable to copy object header message");
+ }
+ }
+
+ FUNC_LEAVE (ret_value);
+}
+
+
+
+/*-------------------------------------------------------------------------
* Function: H5O_link
*
* Purpose: Adjust the link count for an object header by adding
@@ -1209,9 +1279,7 @@ H5O_remove(H5G_entry_t *ent, const H5O_class_t *type, intn sequence)
/* change message type to nil and zero it */
oh->mesg[i].type = H5O_NULL;
HDmemset(oh->mesg[i].raw, 0, oh->mesg[i].raw_size);
- H5O_reset(type, oh->mesg[i].native);
- oh->mesg[i].native = H5MM_xfree(oh->mesg[i].native);
-
+ oh->mesg[i].native = H5O_free (type, oh->mesg[i].native);
oh->mesg[i].dirty = TRUE;
oh->dirty = TRUE;
}
@@ -1850,8 +1918,7 @@ H5O_debug(H5F_t *f, const haddr_t *addr, FILE * stream, intn indent,
(oh->mesg[i].type->debug)(f, mesg, stream, indent+3,
MAX (0, fwidth-3));
}
- H5O_reset (oh->mesg[i].type, mesg);
- H5MM_xfree (mesg);
+ H5O_free (oh->mesg[i].type, mesg);
H5MM_xfree (p);
}
}
diff --git a/src/H5Oprivate.h b/src/H5Oprivate.h
index b6e340b..7db9313 100644
--- a/src/H5Oprivate.h
+++ b/src/H5Oprivate.h
@@ -247,6 +247,8 @@ intn H5O_modify (H5G_entry_t *ent, const H5O_class_t *type, intn overwrite,
uintn flags, const void *mesg);
herr_t H5O_remove (H5G_entry_t *ent, const H5O_class_t *type, intn sequence);
herr_t H5O_reset (const H5O_class_t *type, void *native);
+void *H5O_free (const H5O_class_t *type, void *mesg);
+void *H5O_copy (const H5O_class_t *type, const void *mesg);
herr_t H5O_share (H5F_t *f, const H5O_class_t *type, const void *mesg,
H5HG_t *hobj/*out*/);
herr_t H5O_debug (H5F_t *f, const haddr_t *addr, FILE * stream, intn indent,
diff --git a/src/H5P.c b/src/H5P.c
index 6e8c337..09ed1a6 100644
--- a/src/H5P.c
+++ b/src/H5P.c
@@ -59,7 +59,7 @@ H5P_init_interface(void)
FUNC_ENTER(H5P_init_interface, FAIL);
/*
- * Make sure the file creation and file access default templates are
+ * Make sure the file creation and file access default property lists are
* initialized since this might be done at run-time instead of compile
* time.
*/
@@ -71,9 +71,9 @@ H5P_init_interface(void)
assert(H5P_NCLASSES <= H5_TEMPLATE_MAX - H5_TEMPLATE_0);
/*
- * Initialize the mappings between template classes and atom groups. We
- * keep the two separate because template classes are publicly visible but
- * atom groups aren't.
+ * Initialize the mappings between property list classes and atom
+ * groups. We keep the two separate because property list classes are
+ * publicly visible but atom groups aren't.
*/
for (i = 0; i < H5P_NCLASSES; i++) {
status = H5I_init_group((H5I_group_t)(H5_TEMPLATE_0 +i),
@@ -127,20 +127,23 @@ H5P_term_interface(void)
NAME
H5Pcreate
PURPOSE
- Returns a copy of the default template for some class of templates.
+ Returns a copy of the default property list for some class of property
+ * lists.
USAGE
herr_t H5Pcreate (type)
- H5P_class_t type; IN: Template class whose default is desired.
+ H5P_class_t type; IN: Property list class whose default is
+ * desired.
RETURNS
- Template ID or FAIL
+ Property list ID or FAIL
ERRORS
- ARGS BADVALUE Unknown template class.
- ATOM CANTINIT Can't register template.
+ ARGS BADVALUE Unknown property list class.
+ ATOM CANTINIT Can't register property list.
INTERNAL UNSUPPORTED Not implemented yet.
DESCRIPTION
- Returns a copy of the default template for some class of templates.
+ Returns a copy of the default property list for some class of property
+ * lists.
--------------------------------------------------------------------------*/
hid_t
H5Pcreate(H5P_class_t type)
@@ -150,7 +153,7 @@ H5Pcreate(H5P_class_t type)
FUNC_ENTER(H5Pcreate, FAIL);
- /* Allocate a new template and initialize it with default values */
+ /* Allocate a new property list and initialize it with default values */
switch (type) {
case H5P_FILE_CREATE:
tmpl = H5MM_xmalloc(sizeof(H5F_create_t));
@@ -174,13 +177,13 @@ H5Pcreate(H5P_class_t type)
default:
HRETURN_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
- "unknown template class");
+ "unknown property list class");
}
- /* Atomize the new template */
+ /* Atomize the new property list */
if ((ret_value = H5P_create(type, tmpl)) < 0) {
HRETURN_ERROR(H5E_ATOM, H5E_CANTINIT, FAIL,
- "can't register template");
+ "unable to register property list");
}
FUNC_LEAVE(ret_value);
}
@@ -188,11 +191,12 @@ H5Pcreate(H5P_class_t type)
/*-------------------------------------------------------------------------
* Function: H5P_create
*
- * Purpose: Given a pointer to some template struct, atomize the template
- * and return its ID. The template memory is not copied, so the
- * caller should not free it; it will be freed by H5P_release().
+ * Purpose: Given a pointer to some property list struct, atomize the
+ * property list and return its ID. The property list memory is
+ * not copied, so the caller should not free it; it will be
+ * freed by H5P_release().
*
- * Return: Success: A new template ID.
+ * Return: Success: A new property list ID.
*
* Failure: FAIL
*
@@ -214,10 +218,10 @@ H5P_create(H5P_class_t type, void *tmpl)
assert(type >= 0 && type < H5P_NCLASSES);
assert(tmpl);
- /* Atomize the new template */
+ /* Atomize the new property list */
if ((ret_value=H5I_register((H5I_group_t)(H5_TEMPLATE_0+type), tmpl)) < 0) {
HRETURN_ERROR(H5E_ATOM, H5E_CANTINIT, FAIL,
- "can't register template");
+ "unable to register property list");
}
FUNC_LEAVE(ret_value);
@@ -227,14 +231,14 @@ H5P_create(H5P_class_t type, void *tmpl)
NAME
H5Pclose
PURPOSE
- Release access to a template object.
+ Release access to a property list object.
USAGE
herr_t H5Pclose(oid)
- hid_t oid; IN: Template object to release access to
+ hid_t oid; IN: property list object to release access to
RETURNS
SUCCEED/FAIL
DESCRIPTION
- This function releases access to a template object
+ This function releases access to a property list object
--------------------------------------------------------------------------*/
herr_t
H5Pclose(hid_t tid)
@@ -265,8 +269,8 @@ H5Pclose(hid_t tid)
/*-------------------------------------------------------------------------
* Function: H5P_close
*
- * Purpose: Closes a template and frees the memory associated with the
- * template.
+ * Purpose: Closes a property list and frees the memory associated with
+ * the property list.
*
* Return: Success: SUCCEED
*
@@ -290,7 +294,7 @@ H5P_close (H5P_class_t type, void *tmpl)
/* Check args */
if (!tmpl) HRETURN (SUCCEED);
- /* Some templates may need to do special things */
+ /* Some property lists may need to do special things */
switch (type) {
case H5P_FILE_ACCESS:
switch (fa_list->driver) {
@@ -340,7 +344,7 @@ H5P_close (H5P_class_t type, void *tmpl)
"unknown property list class");
}
- /* Free the template struct and return */
+ /* Free the property list struct and return */
H5MM_xfree(tmpl);
FUNC_LEAVE(SUCCEED);
}
@@ -349,9 +353,9 @@ H5P_close (H5P_class_t type, void *tmpl)
/*-------------------------------------------------------------------------
* Function: H5Pget_class
*
- * Purpose: Returns the class identifier for a template.
+ * Purpose: Returns the class identifier for a property list.
*
- * Return: Success: A template class
+ * Return: Success: A property list class
*
* Failure: H5P_NO_CLASS (-1)
*
@@ -375,7 +379,8 @@ H5Pget_class(hid_t tid)
group >= H5_TEMPLATE_MAX ||
#endif
group < H5_TEMPLATE_0) {
- HRETURN_ERROR(H5E_ATOM, H5E_BADATOM, H5P_NO_CLASS, "not a template");
+ HRETURN_ERROR(H5E_ATOM, H5E_BADATOM, H5P_NO_CLASS,
+ "not a property list");
}
ret_value = (H5P_class_t)(group - H5_TEMPLATE_0);
FUNC_LEAVE(ret_value);
@@ -419,7 +424,7 @@ H5Pget_version(hid_t tid, int *boot/*out*/, int *freelist/*out*/,
if (H5P_FILE_CREATE != H5Pget_class(tid) ||
NULL == (tmpl = H5I_object(tid))) {
HRETURN_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL,
- "not a file creation template");
+ "not a file creation property list");
}
/* Get values */
if (boot) *boot = tmpl->bootblock_ver;
@@ -433,7 +438,8 @@ H5Pget_version(hid_t tid, int *boot/*out*/, int *freelist/*out*/,
/*-------------------------------------------------------------------------
* Function: H5Pset_userblock
*
- * Purpose: Sets the userblock size field of a file creation template.
+ * Purpose: Sets the userblock size field of a file creation property
+ * list.
*
* Return: Success: SUCCEED
*
@@ -458,7 +464,7 @@ H5Pset_userblock(hid_t tid, hsize_t size)
if (H5P_FILE_CREATE != H5Pget_class(tid) ||
NULL == (tmpl = H5I_object(tid))) {
HRETURN_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL,
- "not a file creation template");
+ "not a file creation property list");
}
for (i=8; i<8*sizeof(hsize_t); i++) {
hsize_t p2 = 8==i ? 0 : ((hsize_t)1<<i);
@@ -477,7 +483,8 @@ H5Pset_userblock(hid_t tid, hsize_t size)
/*-------------------------------------------------------------------------
* Function: H5Pget_userblock
*
- * Purpose: Queries the size of a user block in a file creation template.
+ * Purpose: Queries the size of a user block in a file creation property
+ * list.
*
* Return: Success: SUCCEED, size returned through SIZE argument.
*
@@ -501,7 +508,7 @@ H5Pget_userblock(hid_t tid, hsize_t *size)
if (H5P_FILE_CREATE != H5Pget_class(tid) ||
NULL == (tmpl = H5I_object(tid))) {
HRETURN_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL,
- "not a file creation template");
+ "not a file creation property list");
}
/* Get value */
if (size) *size = tmpl->userblock_size;
@@ -512,9 +519,9 @@ H5Pget_userblock(hid_t tid, hsize_t *size)
/*-------------------------------------------------------------------------
* Function: H5Pset_sizes
*
- * Purpose: Sets file size-of addresses and sizes. TEMPLATE
- * should be a file creation template. A value of zero causes
- * the property to not change.
+ * Purpose: Sets file size-of addresses and sizes. TID should be a file
+ * creation property list. A value of zero causes the property
+ * to not change.
*
* Return: Success: SUCCEED
*
@@ -538,7 +545,7 @@ H5Pset_sizes(hid_t tid, size_t sizeof_addr, size_t sizeof_size)
if (H5P_FILE_CREATE != H5Pget_class(tid) ||
NULL == (tmpl = H5I_object(tid))) {
HRETURN_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL,
- "not a file creation template");
+ "not a file creation property list");
}
if (sizeof_addr) {
if (sizeof_addr != 2 && sizeof_addr != 4 &&
@@ -567,8 +574,8 @@ H5Pset_sizes(hid_t tid, size_t sizeof_addr, size_t sizeof_size)
* Function: H5Pget_sizes
*
* Purpose: Returns the size of address and size quantities stored in a
- * file according to a file creation template. Either (or even
- * both) SIZEOF_ADDR and SIZEOF_SIZE may be null pointers.
+ * file according to a file creation property list. Either (or
+ * even both) SIZEOF_ADDR and SIZEOF_SIZE may be null pointers.
*
* Return: Success: SUCCEED, sizes returned through arguments.
*
@@ -593,7 +600,7 @@ H5Pget_sizes(hid_t tid,
if (H5P_FILE_CREATE != H5Pget_class(tid) ||
NULL == (tmpl = H5I_object(tid))) {
HRETURN_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL,
- "not a file creation template");
+ "not a file creation property list");
}
/* Get values */
if (sizeof_addr)
@@ -644,7 +651,7 @@ H5Pset_sym_k(hid_t tid, int ik, int lk)
if (H5P_FILE_CREATE != H5Pget_class(tid) ||
NULL == (tmpl = H5I_object(tid))) {
HRETURN_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL,
- "not a file creation template");
+ "not a file creation property list");
}
/* Set values */
if (ik > 0) {
@@ -686,7 +693,7 @@ H5Pget_sym_k(hid_t tid, int *ik /*out */ , int *lk /*out */ )
if (H5P_FILE_CREATE != H5Pget_class(tid) ||
NULL == (tmpl = H5I_object(tid))) {
HRETURN_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL,
- "not a file creation template");
+ "not a file creation property list");
}
/* Get values */
if (ik)
@@ -726,7 +733,7 @@ H5Pset_istore_k(hid_t tid, int ik)
if (H5P_FILE_CREATE != H5Pget_class(tid) ||
NULL == (tmpl = H5I_object(tid))) {
HRETURN_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL,
- "not a file creation template");
+ "not a file creation property list");
}
if (ik <= 0) {
HRETURN_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
@@ -767,7 +774,7 @@ H5Pget_istore_k(hid_t tid, int *ik /*out */ )
if (H5P_FILE_CREATE != H5Pget_class(tid) ||
NULL == (tmpl = H5I_object(tid))) {
HRETURN_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL,
- "not a file creation template");
+ "not a file creation property list");
}
/* Get value */
if (ik)
@@ -803,7 +810,7 @@ H5Pset_layout(hid_t tid, H5D_layout_t layout)
if (H5P_DATASET_CREATE != H5Pget_class(tid) ||
NULL == (tmpl = H5I_object(tid))) {
HRETURN_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL,
- "not a dataset creation template");
+ "not a dataset creation property list");
}
if (layout < 0 || layout >= H5D_NLAYOUTS) {
HRETURN_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL,
@@ -818,7 +825,7 @@ H5Pset_layout(hid_t tid, H5D_layout_t layout)
/*-------------------------------------------------------------------------
* Function: H5Pget_layout
*
- * Purpose: Retrieves layout type of a dataset creation template.
+ * Purpose: Retrieves layout type of a dataset creation property list.
*
* Return: Success: The layout type
*
@@ -842,7 +849,7 @@ H5Pget_layout(hid_t tid)
if (H5P_DATASET_CREATE != H5Pget_class(tid) ||
NULL == (tmpl = H5I_object(tid))) {
HRETURN_ERROR(H5E_ARGS, H5E_BADTYPE, H5D_LAYOUT_ERROR,
- "not a dataset creation template");
+ "not a dataset creation property list");
}
FUNC_LEAVE(tmpl->layout);
}
@@ -880,7 +887,7 @@ H5Pset_chunk(hid_t tid, int ndims, const hsize_t dim[])
if (H5P_DATASET_CREATE != H5Pget_class(tid) ||
NULL == (tmpl = H5I_object(tid))) {
HRETURN_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL,
- "not a dataset creation template");
+ "not a dataset creation property list");
}
if (ndims <= 0) {
HRETURN_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL,
@@ -1200,7 +1207,7 @@ H5Pset_stdio (hid_t tid)
if (H5P_FILE_ACCESS != H5Pget_class(tid) ||
NULL == (tmpl = H5I_object(tid))) {
HRETURN_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL,
- "not a file access template");
+ "not a file access property list");
}
/* Set driver */
@@ -1280,7 +1287,7 @@ H5Pset_sec2 (hid_t tid)
if (H5P_FILE_ACCESS != H5Pget_class(tid) ||
NULL == (tmpl = H5I_object(tid))) {
HRETURN_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL,
- "not a file access template");
+ "not a file access property list");
}
/* Set driver */
@@ -1364,7 +1371,7 @@ H5Pset_core (hid_t tid, size_t increment)
if (H5P_FILE_ACCESS != H5Pget_class(tid) ||
NULL == (tmpl = H5I_object(tid))) {
HRETURN_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL,
- "not a file access template");
+ "not a file access property list");
}
if (increment<1) {
HRETURN_ERROR (H5E_ARGS, H5E_BADVALUE, FAIL,
@@ -1458,19 +1465,19 @@ H5Pset_split (hid_t tid, const char *meta_ext, hid_t meta_tid,
if (H5P_FILE_ACCESS != H5Pget_class(tid) ||
NULL == (tmpl = H5I_object(tid))) {
HRETURN_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL,
- "not a file access template");
+ "not a file access property list");
}
if (H5P_DEFAULT!=meta_tid &&
(H5P_FILE_ACCESS != H5Pget_class(meta_tid) ||
NULL == (meta_tmpl = H5I_object(meta_tid)))) {
HRETURN_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL,
- "not a file access template");
+ "not a file access property list");
}
if (H5P_DEFAULT!=raw_tid &&
(H5P_FILE_ACCESS != H5Pget_class(raw_tid) ||
NULL == (raw_tmpl = H5I_object(raw_tid)))) {
HRETURN_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL,
- "not a file access template");
+ "not a file access property list");
}
/* Set driver */
@@ -1603,7 +1610,7 @@ H5Pset_family (hid_t tid, hsize_t memb_size, hid_t memb_tid)
if (H5P_FILE_ACCESS != H5Pget_class(tid) ||
NULL == (tmpl = H5I_object(tid))) {
HRETURN_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL,
- "not a file access template");
+ "not a file access property list");
}
if (memb_size && memb_size<1024) {
HRETURN_ERROR (H5E_ARGS, H5E_BADRANGE, FAIL,
@@ -1613,7 +1620,7 @@ H5Pset_family (hid_t tid, hsize_t memb_size, hid_t memb_tid)
(H5P_FILE_ACCESS != H5Pget_class(memb_tid) ||
NULL == (tmpl = H5I_object(memb_tid)))) {
HRETURN_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL,
- "not a file access template");
+ "not a file access property list");
}
/* Set driver */
@@ -1682,6 +1689,106 @@ H5Pget_family (hid_t tid, hsize_t *memb_size/*out*/, hid_t *memb_tid/*out*/)
/*-------------------------------------------------------------------------
+ * Function: H5Pset_cache
+ *
+ * Purpose: Set the number of objects in the meta data cache and the
+ * total number of bytes in the raw data chunk cache.
+ *
+ * The RDCC_W0 value should be between 0 and 1 inclusive and
+ * indicates how much chunks that have been fully read are
+ * favored for preemption. A value of zero means fully read
+ * chunks are treated no differently than other chunks (the
+ * preemption is strictly LRU) while a value of one means fully
+ * read chunks are always preempted before other chunks.
+ *
+ * Return: Success: SUCCEED
+ *
+ * Failure: FAIL
+ *
+ * Programmer: Robb Matzke
+ * Tuesday, May 19, 1998
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5Pset_cache (hid_t tid, int mdc_nelmts, size_t rdcc_nbytes,
+ double rdcc_w0)
+{
+ H5F_access_t *fapl = NULL;
+
+ FUNC_ENTER (H5Pset_cache, FAIL);
+
+ /* Check arguments */
+ if (H5P_FILE_ACCESS!=H5Pget_class (tid) ||
+ NULL==(fapl=H5I_object (tid))) {
+ HRETURN_ERROR (H5E_ARGS, H5E_BADTYPE, FAIL,
+ "not a file access property list");
+ }
+ if (mdc_nelmts<0) {
+ HRETURN_ERROR (H5E_ARGS, H5E_BADVALUE, FAIL,
+ "meta data cache size must be non-negative");
+ }
+ if (rdcc_w0<0.0 || rdcc_w0>1.0) {
+ HRETURN_ERROR (H5E_ARGS, H5E_BADVALUE, FAIL,
+ "raw data cache w0 value must be between 0.0 and 1.0 "
+ "inclusive");
+ }
+
+ /* Set sizes */
+ fapl->mdc_nelmts = mdc_nelmts;
+ fapl->rdcc_nbytes = rdcc_nbytes;
+
+ FUNC_LEAVE (SUCCEED);
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5Pget_cache
+ *
+ * Purpose: Retrieves the maximum possible number of elements in the meta
+ * data cache and the maximum possible number of bytes in the
+ * raw data chunk cache. Any (or all) arguments may be null
+ * pointers in which case the corresponding datum is not
+ * returned.
+ *
+ * Return: Success: SUCCEED
+ *
+ * Failure: FAIL
+ *
+ * Programmer: Robb Matzke
+ * Tuesday, May 19, 1998
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5Pget_cache (hid_t tid, int *mdc_nelmts, size_t *rdcc_nbytes,
+ double *rdcc_w0)
+{
+ H5F_access_t *fapl = NULL;
+
+ FUNC_ENTER (H5Pget_cache, FAIL);
+
+ /* Check arguments */
+ if (H5P_FILE_ACCESS!=H5Pget_class (tid) ||
+ NULL==(fapl=H5I_object (tid))) {
+ HRETURN_ERROR (H5E_ARGS, H5E_BADTYPE, FAIL,
+ "not a file access property list");
+ }
+
+ /* Get sizes */
+ if (mdc_nelmts) *mdc_nelmts = fapl->mdc_nelmts;
+ if (rdcc_nbytes) *rdcc_nbytes = fapl->rdcc_nbytes;
+ if (rdcc_w0) *rdcc_w0 = fapl->rdcc_w0;
+
+ FUNC_LEAVE (SUCCEED);
+}
+
+
+/*-------------------------------------------------------------------------
* Function: H5Pset_buffer
*
* Purpose: Given a dataset transfer property list, set the maximum size
@@ -1889,7 +1996,7 @@ H5Pset_compression (hid_t plist_id, H5Z_method_t method, unsigned int flags,
HRETURN_ERROR (H5E_ARGS, H5E_BADTYPE, FAIL,
"not a dataset creation property list");
}
- if (method<0 || method>H5Z_MAXVAL) {
+ if (method<0 || method>H5Z_USERDEF_MAX) {
HRETURN_ERROR (H5E_ARGS, H5E_BADVALUE, FAIL,
"invalid compression method");
}
@@ -2054,27 +2161,28 @@ H5Pget_deflate (hid_t plist_id)
* Signature: herr_t H5Pset_mpi(hid_t tid, MPI_Comm comm, MPI_Info info)
*
* Purpose: Store the access mode for MPIO call and the user supplied
- * communicator and info in the access template which can then
- * be used to open file. This function is available only in the
- * parallel HDF5 library and is not a collective function.
+ * communicator and info in the access property list which can
+ * then be used to open file. This function is available only
+ * in the parallel HDF5 library and is not a collective
+ * function.
*
* Parameters:
* hid_t tid
- * ID of template to modify
+ * ID of property list to modify
* MPI_Comm comm
* MPI communicator to be used for file open as defined in
* MPI_FILE_OPEN of MPI-2. This function does not make a
* duplicated communicator. Any modification to comm after
* this function call returns may have undetermined effect
- * to the access template. Users should call this function
- * again to setup the template.
+ * to the access property list. Users should call this
+ * function again to setup the property list.
* MPI_Info info
* MPI info object to be used for file open as defined in
* MPI_FILE_OPEN of MPI-2. This function does not make a
* duplicated info. Any modification to info after
* this function call returns may have undetermined effect
- * to the access template. Users should call this function
- * again to setup the template.
+ * to the access property list. Users should call this
+ * function again to setup the property list.
*
* Return: Success: SUCCEED
*
@@ -2086,11 +2194,11 @@ H5Pget_deflate (hid_t plist_id)
* Modifications:
*
* Robb Matzke, 18 Feb 1998
- * Check all arguments before the template is updated so we don't leave
- * the template in a bad state if something goes wrong. Also, the
- * template data type changed to allow more generality so all the
- * mpi-related stuff is in the `u.mpi' member. The `access_mode' will
- * contain only mpi-related flags defined in H5Fpublic.h.
+ * Check all arguments before the property list is updated so we don't
+ * leave the property list in a bad state if something goes wrong. Also,
+ * the property list data type changed to allow more generality so all
+ * the mpi-related stuff is in the `u.mpi' member. The `access_mode'
+ * will contain only mpi-related flags defined in H5Fpublic.h.
*
* Albert Cheng, Apr 16, 1998
* Removed the access_mode argument. The access_mode is changed
@@ -2112,7 +2220,7 @@ H5Pset_mpi (hid_t tid, MPI_Comm comm, MPI_Info info)
if (H5P_FILE_ACCESS != H5Pget_class(tid) ||
NULL == (tmpl = H5I_object(tid))) {
HRETURN_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL,
- "not a file access template");
+ "not a file access property list");
}
#ifdef LATER
@@ -2286,24 +2394,24 @@ H5Pget_xfer (hid_t tid, H5D_transfer_t *data_xfer_mode)
NAME
H5Pcopy
PURPOSE
- Copy a template
+ Copy a property list
USAGE
hid_t H5P_copy(tid)
- hid_t tid; IN: Template object to copy
+ hid_t tid; IN: property list object to copy
RETURNS
- Returns template ID (atom) on success, FAIL on failure
+ Returns property list ID (atom) on success, FAIL on failure
ERRORS
- ARGS BADRANGE Unknown template class.
- ATOM BADATOM Can't unatomize template.
- ATOM CANTREGISTER Register the atom for the new template.
+ ARGS BADRANGE Unknown property list class.
+ ATOM BADATOM Can't unatomize property list.
+ ATOM CANTREGISTER Register the atom for the new property list.
INTERNAL UNSUPPORTED Dataset transfer properties are not implemented
yet.
INTERNAL UNSUPPORTED File access properties are not implemented yet.
DESCRIPTION
- This function creates a new copy of a template with all the same parameter
- settings.
+ * This function creates a new copy of a property list with all the same
+ * parameter settings.
--------------------------------------------------------------------------*/
hid_t
H5Pcopy(hid_t tid)
@@ -2321,19 +2429,19 @@ H5Pcopy(hid_t tid)
(type = H5Pget_class(tid)) < 0 ||
(group = H5I_group(tid)) < 0) {
HRETURN_ERROR(H5E_ATOM, H5E_BADATOM, FAIL,
- "can't unatomize template");
+ "unable to unatomize property list");
}
/* Copy it */
if (NULL==(new_tmpl=H5P_copy (type, tmpl))) {
HRETURN_ERROR (H5E_INTERNAL, H5E_CANTINIT, FAIL,
- "unable to copy template");
+ "unable to copy property list");
}
- /* Register the atom for the new template */
+ /* Register the atom for the new property list */
if ((ret_value = H5I_register(group, new_tmpl)) < 0) {
HRETURN_ERROR(H5E_ATOM, H5E_CANTREGISTER, FAIL,
- "unable to atomize template pointer");
+ "unable to atomize property list pointer");
}
FUNC_LEAVE(ret_value);
}
@@ -2341,10 +2449,10 @@ H5Pcopy(hid_t tid)
/*-------------------------------------------------------------------------
* Function: H5P_copy
*
- * Purpose: Creates a new template and initializes it with some other
- * template.
+ * Purpose: Creates a new property list and initializes it with some
+ * other property list.
*
- * Return: Success: Ptr to new template
+ * Return: Success: Ptr to new property list
*
* Failure: NULL
*
@@ -2368,7 +2476,7 @@ H5P_copy (H5P_class_t type, const void *src)
FUNC_ENTER (H5P_copy, NULL);
- /* How big is the template */
+ /* How big is the property list */
switch (type) {
case H5P_FILE_CREATE:
size = sizeof(H5F_create_t);
@@ -2388,10 +2496,10 @@ H5P_copy (H5P_class_t type, const void *src)
default:
HRETURN_ERROR(H5E_ARGS, H5E_BADRANGE, NULL,
- "unknown template class");
+ "unknown property list class");
}
- /* Create the new template */
+ /* Create the new property list */
dst = H5MM_xmalloc(size);
HDmemcpy(dst, src, size);
@@ -2446,7 +2554,7 @@ H5P_copy (H5P_class_t type, const void *src)
default:
HRETURN_ERROR(H5E_ARGS, H5E_BADRANGE, NULL,
- "unknown template class");
+ "unknown property list class");
}
FUNC_LEAVE (dst);
diff --git a/src/H5Ppublic.h b/src/H5Ppublic.h
index e9717d1..9a3cda2 100644
--- a/src/H5Ppublic.h
+++ b/src/H5Ppublic.h
@@ -96,6 +96,10 @@ H5Z_method_t H5Pget_compression (hid_t plist_id, unsigned int *flags/*out*/,
void *client_data/*out*/);
herr_t H5Pset_deflate (hid_t plist_id, int level);
int H5Pget_deflate (hid_t plist_id);
+herr_t H5Pset_cache (hid_t plist_id, int mdc_nelmts, size_t rdcc_nbytes,
+ double rdcc_w0);
+herr_t H5Pget_cache (hid_t plist_id, int *mdc_nelmts, size_t *rdcc_nbytes,
+ double *rdcc_w0);
#ifdef HAVE_PARALLEL
herr_t H5Pset_mpi (hid_t tid, MPI_Comm comm, MPI_Info info);
diff --git a/src/H5Z.c b/src/H5Z.c
index 8a28f54..227d7d8 100644
--- a/src/H5Z.c
+++ b/src/H5Z.c
@@ -50,7 +50,7 @@ typedef struct H5Z_class_t {
} uncomp;
#endif
} H5Z_class_t;
-static H5Z_class_t H5Z_g[H5Z_MAXVAL+1];
+static H5Z_class_t H5Z_g[H5Z_USERDEF_MAX+1];
/* Compression and uncompression methods */
static size_t H5Z_zlib_c (unsigned int flags, size_t __unused__ cd_size,
@@ -114,21 +114,21 @@ H5Z_term_interface (void)
int i, nprint=0;
char name[16];
- for (i=0; i<=H5Z_MAXVAL; i++) {
+ for (i=0; i<=H5Z_USERDEF_MAX; i++) {
if (H5Z_g[i].comp.nbytes || H5Z_g[i].uncomp.nbytes) {
if (0==nprint++) {
HDfprintf (stderr, "H5Z: compression statistics accumulated "
"over life of library:\n");
- HDfprintf (stderr, " %-10s %8s %8s %8s %8s %8s %8s %9s\n",
+ HDfprintf (stderr, " %-10s %10s %7s %7s %8s %8s %8s %9s\n",
"Method", "Total", "Overrun", "Errors", "User",
"System", "Elapsed", "Bandwidth");
- HDfprintf (stderr, " %-10s %8s %8s %8s %8s %8s %8s %9s\n",
+ HDfprintf (stderr, " %-10s %10s %7s %7s %8s %8s %8s %9s\n",
"------", "-----", "-------", "------", "----",
"------", "-------", "---------");
}
sprintf (name, "%s-c", H5Z_g[i].name);
HDfprintf (stderr,
- " %-12s %8Hd %8Hd %8Hd %8.2f %8.2f %8.2f ",
+ " %-12s %10Hd %7Hd %7Hd %8.2f %8.2f %8.2f ",
name,
H5Z_g[i].comp.nbytes,
H5Z_g[i].comp.over,
@@ -145,7 +145,7 @@ H5Z_term_interface (void)
sprintf (name, "%s-u", H5Z_g[i].name);
HDfprintf (stderr,
- " %-12s %8Hd %8Hd %8Hd %8.2f %8.2f %8.2f ",
+ " %-12s %10Hd %7Hd %7Hd %8.2f %8.2f %8.2f ",
name,
H5Z_g[i].uncomp.nbytes,
H5Z_g[i].uncomp.over,
@@ -198,7 +198,7 @@ H5Zregister (H5Z_method_t method, const char *name, H5Z_func_t cfunc,
FUNC_ENTER (H5Zregister, FAIL);
/* Check args */
- if (method<0 || method>H5Z_MAXVAL) {
+ if (method<0 || method>H5Z_USERDEF_MAX) {
HRETURN_ERROR (H5E_ARGS, H5E_BADVALUE, FAIL,
"invalid data compression method number");
}
@@ -381,7 +381,7 @@ H5Z_register (H5Z_method_t method, const char *name, H5Z_func_t cfunc,
{
FUNC_ENTER (H5Z_register, FAIL);
- assert (method>=0 && method<=H5Z_MAXVAL);
+ assert (method>=0 && method<=H5Z_USERDEF_MAX);
H5MM_xfree (H5Z_g[method].name);
H5Z_g[method].name = H5MM_xstrdup (name);
H5Z_g[method].compress = cfunc;
diff --git a/src/H5Zpublic.h b/src/H5Zpublic.h
index daebf89..2ce1326 100644
--- a/src/H5Zpublic.h
+++ b/src/H5Zpublic.h
@@ -13,24 +13,25 @@
* 15 are defined by the library. Methods 16-255 are user-defined.
*/
typedef int H5Z_method_t;
-#define H5Z_NONE 0 /*no compression, must be zero */
-#define H5Z_DEFLATE 1 /*deflation like gzip */
-#define H5Z_RES_2 2 /*reserved for internal use */
-#define H5Z_RES_3 3 /*reserved for internal use */
-#define H5Z_RES_4 4 /*reserved for internal use */
-#define H5Z_RES_5 5 /*reserved for internal use */
-#define H5Z_RES_6 6 /*reserved for internal use */
-#define H5Z_RES_7 7 /*reserved for internal use */
-#define H5Z_RES_8 8 /*reserved for internal use */
-#define H5Z_RES_9 9 /*reserved for internal use */
-#define H5Z_RES_10 10 /*reserved for internal use */
-#define H5Z_RES_11 11 /*reserved for internal use */
-#define H5Z_RES_12 12 /*reserved for internal use */
-#define H5Z_RES_13 13 /*reserved for internal use */
-#define H5Z_RES_14 14 /*reserved for internal use */
-#define H5Z_RES_15 15 /*reserved for internal use */
+#define H5Z_NONE 0 /*no compression, must be zero */
+#define H5Z_DEFLATE 1 /*deflation like gzip */
+#define H5Z_RES_2 2 /*reserved for internal use */
+#define H5Z_RES_3 3 /*reserved for internal use */
+#define H5Z_RES_4 4 /*reserved for internal use */
+#define H5Z_RES_5 5 /*reserved for internal use */
+#define H5Z_RES_6 6 /*reserved for internal use */
+#define H5Z_RES_7 7 /*reserved for internal use */
+#define H5Z_RES_8 8 /*reserved for internal use */
+#define H5Z_RES_9 9 /*reserved for internal use */
+#define H5Z_RES_10 10 /*reserved for internal use */
+#define H5Z_RES_11 11 /*reserved for internal use */
+#define H5Z_RES_12 12 /*reserved for internal use */
+#define H5Z_RES_13 13 /*reserved for internal use */
+#define H5Z_RES_14 14 /*reserved for internal use */
+#define H5Z_RES_15 15 /*reserved for internal use */
/* user-defined 16-255 */
-#define H5Z_MAXVAL 255 /*maximum compression method ID */
+#define H5Z_USERDEF_MIN 16 /*first user-defined method */
+#define H5Z_USERDEF_MAX 255 /*last user-defined method */
/*
* A compression function takes some configuration data which comes from the
diff --git a/src/Makefile.in b/src/Makefile.in
index 0f7db53..1cffe5a 100644
--- a/src/Makefile.in
+++ b/src/Makefile.in
@@ -18,10 +18,10 @@ PARALLEL_SRC=H5Fmpio.c
LIB_SRC=H5.c H5A.c H5AC.c H5B.c H5D.c H5E.c H5F.c H5Farray.c H5Fcore.c \
H5Ffamily.c H5Fistore.c H5Flow.c H5Fsec2.c H5Fsplit.c H5Fstdio.c H5G.c \
- H5Gent.c H5Gnode.c H5Gstab.c H5HG.c H5HL.c H5I.c H5MF.c H5MM.c \
- H5O.c H5Oattr.c H5Ocomp.c H5Ocont.c H5Odtype.c H5Oefl.c H5Olayout.c \
- H5Oname.c H5Onull.c H5Osdspace.c H5Oshared.c H5Ostab.c H5P.c H5S.c \
- H5Ssimp.c H5T.c H5Tconv.c H5Tinit.c H5V.c H5Z.c @PARALLEL_SRC@
+ H5Gent.c H5Gnode.c H5Gstab.c H5HG.c H5HL.c H5I.c H5MF.c H5MM.c H5O.c \
+ H5Oattr.c H5Ocomp.c H5Ocont.c H5Odtype.c H5Oefl.c H5Olayout.c H5Oname.c \
+ H5Onull.c H5Osdspace.c H5Oshared.c H5Ostab.c H5P.c H5S.c H5Ssimp.c H5T.c \
+ H5Tconv.c H5Tinit.c H5V.c H5Z.c @PARALLEL_SRC@
LIB_OBJ=$(LIB_SRC:.c=.o)