summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--MANIFEST4
-rw-r--r--src/H5.c2
-rw-r--r--src/H5AC.c1842
-rw-r--r--src/H5ACprivate.h97
-rw-r--r--src/H5B.c52
-rw-r--r--src/H5C.c3490
-rw-r--r--src/H5Cprivate.h374
-rw-r--r--src/H5Cpublic.h40
-rw-r--r--src/H5E.c29
-rw-r--r--src/H5Edefin.h9
-rw-r--r--src/H5Einit.h37
-rw-r--r--src/H5Eprivate.h1
-rw-r--r--src/H5Epubgen.h18
-rw-r--r--src/H5Eterm.h13
-rw-r--r--src/H5F.c8
-rw-r--r--src/H5Gnode.c39
-rw-r--r--src/H5HG.c187
-rw-r--r--src/H5HL.c38
-rw-r--r--src/H5O.c56
-rw-r--r--src/H5Pdcpl.c3
-rw-r--r--src/H5err.txt6
-rw-r--r--src/Makefile.in32
-rw-r--r--test/Makefile.in16
-rw-r--r--test/cache.c4067
24 files changed, 9177 insertions, 1283 deletions
diff --git a/MANIFEST b/MANIFEST
index 6b57ce5..45f5029 100644
--- a/MANIFEST
+++ b/MANIFEST
@@ -774,6 +774,9 @@
./src/H5Bpkg.h
./src/H5Bprivate.h
./src/H5Bpublic.h
+./src/H5C.c
+./src/H5Cprivate.h
+./src/H5Cpublic.h
./src/H5D.c
./src/H5Dcontig.c
./src/H5Dcompact.c
@@ -959,6 +962,7 @@
./test/Makefile.in
./test/big.c
./test/bittests.c
+./test/cache.c
./test/cmpd_dset.c
./test/createnoenc.c
./test/dangle.c
diff --git a/src/H5.c b/src/H5.c
index dec226e..b81e36e 100644
--- a/src/H5.c
+++ b/src/H5.c
@@ -137,6 +137,8 @@ H5_init_library(void)
* & dataset interfaces though, in order to provide them with the proper
* property classes.
*/
+ if (H5E_init()<0)
+ HGOTO_ERROR(H5E_FUNC, H5E_CANTINIT, FAIL, "unable to initialize error interface")
if (H5P_init()<0)
HGOTO_ERROR(H5E_FUNC, H5E_CANTINIT, FAIL, "unable to initialize property list interface")
if (H5F_init()<0)
diff --git a/src/H5AC.c b/src/H5AC.c
index 5742a80..ee5f082 100644
--- a/src/H5AC.c
+++ b/src/H5AC.c
@@ -31,6 +31,14 @@
* Quincey Koziol, 22 Apr 2000
* Turned on "H5AC_SORT_BY_ADDR"
*
+ * John Mainzer, 5/19/04
+ * Complete redesign and rewrite. See the header comments for
+ * H5AC_t for an overview of what is going on.
+ *
+ * John Mainzer, 6/4/04
+ * Factored the new cache code into a separate file (H5C.c) to
+ * faciltate re-use. Re-worked this file again to use H5C.
+ *
*-------------------------------------------------------------------------
*/
@@ -49,9 +57,7 @@
#include "H5Eprivate.h" /* Error handling */
#include "H5Fpkg.h" /* Files */
#include "H5FDprivate.h" /* File drivers */
-#include "H5FLprivate.h" /* Free Lists */
#include "H5Iprivate.h" /* IDs */
-#include "H5MMprivate.h" /* Memory management */
#include "H5Pprivate.h" /* Property lists */
#ifdef H5_HAVE_FPHDF5
@@ -60,56 +66,12 @@
#endif /* H5_HAVE_FPHDF5 */
/*
- * Private macros
- */
-
-/* Hash an address in the file to an offset in the cache */
-#define H5AC_HASH_DIVISOR 8 /* Attempt to spread out the hashing */
- /* This should be the same size as the alignment of */
- /* of the smallest file format object written to the file. */
-#define H5AC_HASH(F,ADDR) H5F_addr_hash((ADDR/H5AC_HASH_DIVISOR),(F)->shared->cache->nslots)
-
-/*
- * Private typedefs & structs
- */
-
-#ifdef H5AC_DEBUG
-typedef struct H5AC_prot_t {
- int nprots; /*number of things protected */
- int aprots; /*nelmts of `prot' array */
- H5AC_info_t **slot; /*array of pointers to protected things */
-} H5AC_prot_t;
-#endif /* H5AC_DEBUG */
-
-struct H5AC_t {
- unsigned nslots; /*number of cache slots */
- H5AC_info_t **slot; /*the cache slots, an array of pointers to the cached objects */
- H5AC_info_t **dslot; /*"held object" cache slots, an array of pointers to dirty cached objects */
-#ifdef H5AC_DEBUG
- H5AC_prot_t *prot; /*the protected slots */
-#endif /* H5AC_DEBUG */
- int nprots; /*number of protected objects */
-#ifdef H5AC_DEBUG
- struct {
- unsigned nhits; /*number of cache hits */
- unsigned nmisses; /*number of cache misses */
- unsigned ninits; /*number of cache inits */
- unsigned nflushes; /*number of flushes to disk */
-#ifdef H5_HAVE_PARALLEL
- unsigned ndestroys; /*number of cache destroys */
- unsigned nholds; /*number of cache holds */
- unsigned nrestores; /*number of cache restores */
-#endif /* H5_HAVE_PARALLEL */
- } diagnostics[H5AC_NTYPES]; /*diagnostics for each type of object*/
-#endif /* H5AC_DEBUG */
-};
-
-/*
* Private file-scope variables.
*/
/* Default dataset transfer property list for metadata I/O calls */
-/* (Collective set, "block before metadata write" set and "library internal" set) */
+/* (Collective set, "block before metadata write" set and */
+/* "library internal" set) */
/* (Global variable definition, declaration is in H5ACprivate.h also) */
hid_t H5AC_dxpl_id=(-1);
@@ -123,33 +85,26 @@ static hid_t H5AC_noblock_dxpl_id=(-1);
/* (Global variable definition, declaration is in H5ACprivate.h also) */
hid_t H5AC_ind_dxpl_id=(-1);
-static H5AC_t *current_cache_g = NULL; /*for sorting */
-
-/* Declare a free list to manage the H5AC_t struct */
-H5FL_DEFINE_STATIC(H5AC_t);
-/* Declare a free list to manage the cache mapping sequence information */
-H5FL_SEQ_DEFINE_STATIC(unsigned);
-
-/* Declare a free list to manage the cache slot sequence information */
-H5FL_SEQ_DEFINE_STATIC(H5AC_info_ptr_t);
+/*
+ * Private file-scope function declarations:
+ */
-#ifdef H5AC_DEBUG
-/* Declare a free list to manage the protected slot sequence information */
-H5FL_SEQ_DEFINE_STATIC(H5AC_prot_t);
-#endif /* H5AC_DEBUG */
+static herr_t H5AC_check_if_write_permitted(H5F_t *f,
+ hid_t dxpl_id,
+ hbool_t * write_permitted_ptr);
/*-------------------------------------------------------------------------
- * Function: H5AC_init
+ * Function: H5AC_init
*
- * Purpose: Initialize the interface from some other layer.
+ * Purpose: Initialize the interface from some other layer.
*
- * Return: Success: non-negative
+ * Return: Success: non-negative
*
- * Failure: negative
+ * Failure: negative
*
- * Programmer: Quincey Koziol
+ * Programmer: Quincey Koziol
* Saturday, January 18, 2003
*
* Modifications:
@@ -197,7 +152,7 @@ H5AC_init_interface(void)
FUNC_ENTER_NOAPI_NOINIT(H5AC_init_interface)
/* Sanity check */
- assert(H5P_CLS_DATASET_XFER_g!=(-1));
+ HDassert(H5P_CLS_DATASET_XFER_g!=(-1));
/* Get the dataset transfer property list class object */
if (NULL == (xfer_pclass = H5I_object(H5P_CLS_DATASET_XFER_g)))
@@ -366,55 +321,83 @@ H5AC_term_interface(void)
*
* Modifications:
*
+ * Complete re-design and re-write to support the re-designed
+ * metadata cache.
+ *
+ * At present, the size_hint is ignored, and the
+ * max_cache_size and min_clean_size fields are hard
+ * coded. This should be fixed, but a parameter
+ * list change will be required, so I will leave it
+ * for now.
+ *
+ * Since no-one seems to care, the function now returns
+ * one on success.
+ * JRM - 4/28/04
+ *
+ * Reworked the function again after abstracting its guts to
+ * the similar function in H5C.c. The function is now a
+ * wrapper for H5C_create().
+ * JRM - 6/4/04
*-------------------------------------------------------------------------
*/
+
+const char * H5AC_entry_type_names[H5AC_NTYPES] =
+{
+ "B-tree nodes",
+ "symbol table nodes",
+ "local heaps",
+ "global heaps",
+ "object headers"
+};
+
int
-H5AC_create(const H5F_t *f, int size_hint)
+H5AC_create(const H5F_t *f,
+ int UNUSED size_hint)
{
- H5AC_t *cache = NULL;
- int ret_value; /* Return value */
+ H5AC_t * cache_ptr = NULL;
+ int ret_value=1; /* Return value */
FUNC_ENTER_NOAPI(H5AC_create, FAIL)
- assert(f);
- assert(NULL == f->shared->cache);
-
- /* If size hint is negative, use the default size */
- if (size_hint < 1)
- size_hint = H5AC_NSLOTS;
-
- if (NULL==(f->shared->cache = cache = H5FL_CALLOC(H5AC_t)))
- HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed")
- cache->nslots = (unsigned)size_hint;
- if (NULL==( cache->slot = H5FL_SEQ_CALLOC(H5AC_info_ptr_t,cache->nslots)))
- HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed")
- if (NULL==( cache->dslot = H5FL_SEQ_CALLOC(H5AC_info_ptr_t,cache->nslots)))
- HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed")
-#ifdef H5AC_DEBUG
- if ((cache->prot = H5FL_SEQ_CALLOC(H5AC_prot_t,cache->nslots))==NULL)
- HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed")
-#endif /* H5AC_DEBUG */
+ HDassert(f);
+ HDassert(NULL == f->shared->cache);
- /* Set return value */
- ret_value=size_hint;
+ /* this is test code that should be removed when we start passing
+ * in proper size hints.
+ * -- JRM
+ */
+ cache_ptr = H5C_create(H5C__DEFAULT_MAX_CACHE_SIZE,
+ H5C__DEFAULT_MIN_CLEAN_SIZE,
+ (H5AC_NTYPES - 1),
+ &H5AC_entry_type_names,
+ H5AC_check_if_write_permitted);
+
+ if ( NULL == cache_ptr ) {
+
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed")
+
+ } else {
+
+ f->shared->cache = (struct H5AC_t *)cache_ptr;
+
+ }
done:
- if(ret_value<0) {
- if(cache!=NULL) {
- if(cache->dslot !=NULL)
- cache->dslot = H5FL_SEQ_FREE (H5AC_info_ptr_t,cache->dslot);
- if(cache->slot !=NULL)
- cache->slot = H5FL_SEQ_FREE (H5AC_info_ptr_t,cache->slot);
-#ifdef H5AC_DEBUG
- if(cache->prot !=NULL)
- cache->prot = H5FL_SEQ_FREE (H5AC_prot_t,cache->prot);
-#endif /* H5AC_DEBUG */
- f->shared->cache = H5FL_FREE (H5AC_t,f->shared->cache);
+
+ if ( ret_value < 0 ) {
+
+ if ( cache_ptr != NULL ) {
+
+ H5C_dest_empty(cache_ptr);
+ f->shared->cache = NULL;
+
} /* end if */
+
} /* end if */
FUNC_LEAVE_NOAPI(ret_value)
-}
+
+} /* H5AC_create() */
/*-------------------------------------------------------------------------
@@ -432,100 +415,77 @@ done:
*
* Modifications:
*
+ * Complete re-design and re-write to support the re-designed
+ * metadata cache.
+ * JRM - 5/12/04
+ *
+ * Abstracted the guts of the function to H5C_dest() in H5C.c,
+ * and then re-wrote the function as a wrapper for H5C_dest().
+ *
+ * JRM - 6/7/04
+ *
*-------------------------------------------------------------------------
*/
herr_t
-H5AC_dest(H5F_t *f, hid_t dxpl_id)
+H5AC_dest(H5F_t *f,
+ hid_t dxpl_id)
{
- H5AC_t *cache = NULL;
+ H5AC_t *cache_ptr = NULL;
herr_t ret_value=SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(H5AC_dest, FAIL)
assert(f);
assert(f->shared->cache);
- cache = f->shared->cache;
+ cache_ptr = (H5AC_t *)(f->shared->cache);
- if (H5AC_flush(f, dxpl_id, NULL, HADDR_UNDEF, H5F_FLUSH_INVALIDATE) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush cache")
+ f->shared->cache = NULL;
-#ifdef H5AC_DEBUG
- {
- unsigned i;
- for (i=0; i<cache->nslots; i++) {
- cache->prot[i].slot = H5MM_xfree(cache->prot[i].slot);
- cache->prot[i].aprots = 0;
- cache->prot[i].nprots = 0;
- }
- cache->prot = H5FL_SEQ_FREE(H5AC_prot_t,cache->prot);
- }
-#endif
+ if ( H5C_dest(f, dxpl_id, H5AC_noblock_dxpl_id, cache_ptr) < 0 ) {
- cache->dslot = H5FL_SEQ_FREE(H5AC_info_ptr_t,cache->dslot);
- cache->slot = H5FL_SEQ_FREE(H5AC_info_ptr_t,cache->slot);
- cache->nslots = 0;
- f->shared->cache = cache = H5FL_FREE(H5AC_t,cache);
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "can't destroy cache")
+ }
done:
- FUNC_LEAVE_NOAPI(ret_value)
-}
-
-
-/*-------------------------------------------------------------------------
- * Function: H5AC_compare
- *
- * Purpose: Compare two hash entries by address. Unused entries are
- * all equal to one another and greater than all used entries.
- *
- * Return: Success: -1, 0, 1
- *
- * Failure: never fails
- *
- * Programmer: Robb Matzke
- * matzke@llnl.gov
- * Aug 12 1997
- *
- * Modifications:
- *
- *-------------------------------------------------------------------------
- */
-static int
-H5AC_compare(const void *_a, const void *_b)
-{
- unsigned a = *((const unsigned *) _a);
- unsigned b = *((const unsigned *) _b);
- H5AC_info_t *slot_a;
- H5AC_info_t *slot_b;
- int ret_value=0;
-
- /* Use FUNC_ENTER_NOAPI_NOINIT here to avoid performance issues */
- FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5AC_compare)
-
- assert(current_cache_g);
-
- /* Create aliases for slots */
- slot_a=current_cache_g->slot[a];
- slot_b=current_cache_g->slot[b];
-
- assert(slot_a);
- assert(slot_b);
- assert(slot_a->type);
- assert(slot_b->type);
-
- if (slot_a->addr < slot_b->addr) {
- ret_value=(-1);
- } else if (slot_a->addr > slot_b->addr) {
- ret_value=1;
- }
FUNC_LEAVE_NOAPI(ret_value)
-}
+
+} /* H5AC_dest() */
/*-------------------------------------------------------------------------
* Function: H5AC_flush
*
- * Purpose: Flushes (and destroys if DESTROY is non-zero) the specified
+ * Purpose: Flush (and possibly destroy) the metadata cache associated
+ * with the specified file.
+ *
+ * This is a re-write of an earlier version of the function
+ * which was reputedly capable of flushing (and destroying
+ * if requested) individual entries, individual entries if
+ * they match the supplied type, all entries of a given type,
+ * as well as all entries in the cache.
+ *
+ * As only this last capability is actually used at present,
+ * I have not implemented the other capabilities in this
+ * version of the function.
+ *
+ * The type and addr parameters are retained to avoid source
+ * code changed, but values other than NULL and HADDR_UNDEF
+ * respectively are errors. If all goes well, they should
+ * be removed, and the function renamed to something more
+ * descriptive -- perhaps H5AC_flush_cache.
+ *
+ * If the cache contains protected entries, the function will
+ * fail, as protected entries cannot be flushed. However
+ * all unprotected entries should be flushed before the
+ * function returns failure.
+ *
+ * For historical purposes, the original version of the
+ * purpose section is reproduced below:
+ *
+ * ============ Original Version of "Purpose:" ============
+ *
+ * Flushes (and destroys if DESTROY is non-zero) the specified
* entry from the cache. If the entry TYPE is CACHE_FREE and
* ADDR is HADDR_UNDEF then all types of entries are
* flushed. If TYPE is CACHE_FREE and ADDR is defined then
@@ -546,258 +506,52 @@ H5AC_compare(const void *_a, const void *_b)
* Modifications:
* Robb Matzke, 1999-07-27
* The ADDR argument is passed by value.
+ *
+ * Complete re-write. See above for details. -- JRM 5/11/04
+ *
+ * Abstracted the guts of the function to H5C_dest() in H5C.c,
+ * and then re-wrote the function as a wrapper for H5C_dest().
+ *
+ * JRM - 6/7/04
+ *
*-------------------------------------------------------------------------
*/
herr_t
-H5AC_flush(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t addr, unsigned flags)
+H5AC_flush(H5F_t *f,
+ hid_t dxpl_id,
+ const H5AC_class_t *type,
+ haddr_t addr,
+ unsigned flags)
{
- herr_t status;
- H5AC_flush_func_t flush=NULL; /* 'flush' callback for an object */
- H5AC_info_t **info;
- unsigned *map = NULL; /* Mapping of cache entries */
- hbool_t destroy=(flags&H5F_FLUSH_INVALIDATE)>0; /* Flag for destroying objects */
- hbool_t clear_only=(flags&H5F_FLUSH_CLEAR_ONLY)>0; /* Flag for only clearing objects */
- unsigned nslots;
- H5AC_t *cache;
- unsigned u; /* Local index variable */
- herr_t ret_value=SUCCEED; /* Return value */
+ herr_t status;
+ herr_t ret_value = SUCCEED;
+ H5AC_t * cache_ptr;
FUNC_ENTER_NOAPI(H5AC_flush, FAIL)
- assert(f);
- assert(f->shared->cache);
+ HDassert(f);
+ HDassert(f->shared->cache);
+ HDassert(type == NULL);
+ HDassert(!H5F_addr_defined(addr));
- /* Get local copy of this information */
- cache = f->shared->cache;
+ cache_ptr = (H5AC_t *)(f->shared->cache);
- if (!H5F_addr_defined(addr)) {
- unsigned first_flush=1; /* Indicate if this is the first flush */
+ status = H5C_flush_cache(f,
+ dxpl_id,
+ H5AC_noblock_dxpl_id,
+ cache_ptr,
+ flags);
- /*
- * Sort the cache entries by address since flushing them in
- * ascending order by address is much more efficient.
- */
- if (NULL==(map=H5FL_SEQ_MALLOC(unsigned,cache->nslots)))
- HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed")
-#ifdef H5_HAVE_PARALLEL
- /* If MPI based VFD is used, do special parallel I/O actions */
- if(IS_H5FD_MPI(f)) {
- H5AC_info_t **dinfo;
-#ifdef H5AC_DEBUG
- H5AC_subid_t type_id;
-#endif /* H5AC_DEBUG */
-#ifndef NDEBUG
- H5P_genplist_t *dxpl; /* Dataset transfer property list */
- H5FD_mpio_xfer_t xfer_mode; /* I/O transfer mode property value */
-
- /* Get the dataset transfer property list */
- if (NULL == (dxpl = H5I_object(dxpl_id)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset creation property list")
-
- /* Get the transfer mode property */
- if(H5P_get(dxpl, H5D_XFER_IO_XFER_MODE_NAME, &xfer_mode) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't retrieve xfer mode")
-
- /* Sanity check transfer mode */
- assert(xfer_mode == H5FD_MPIO_COLLECTIVE || IS_H5FD_FPHDF5(f));
-#endif /* NDEBUG */
-
- /* Create the mapping */
- for (u = nslots = 0; u < cache->nslots; u++) {
- info = cache->slot + u;
- dinfo = cache->dslot + u;
-
- /* Move dirty metadata from 'held' slots into 'regular' slots */
- if((*dinfo)!=NULL) {
- H5AC_dest_func_t dest;
-
- /* Various sanity checks */
- assert((*dinfo)->dirty);
- assert((*info)!=NULL);
- assert((*info)->dirty==0);
-
-#ifdef H5AC_DEBUG
- type_id=(*info)->type->id; /* Remember this for later */
-#endif /* H5AC_DEBUG */
-
- /* Destroy 'current' information */
- dest = (*info)->type->dest;
- if ((dest)(f, (*info))<0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "unable to free cached object")
-
- /* Restore 'held' information back to 'current' information */
- (*info)=(*dinfo);
-
- /* Clear 'held' information */
- (*dinfo)=NULL;
-
-#ifdef H5AC_DEBUG
- cache->diagnostics[type_id].nrestores++;
-#endif /* H5AC_DEBUG */
- } /* end if */
- if ((*info))
- map[nslots++] = u;
- } /* end for */
- } /* end if */
- else {
-#endif /* H5_HAVE_PARALLEL */
- for (u = nslots = 0; u < cache->nslots; u++) { /*lint !e539 Positive indention is OK */
- if (cache->slot[u]!=NULL)
- map[nslots++] = u;
- }
-#ifdef H5_HAVE_PARALLEL
- } /* end else */
-#endif /* H5_HAVE_PARALLEL */
- assert(NULL == current_cache_g);
- current_cache_g = cache;
- HDqsort(map, nslots, sizeof(unsigned), H5AC_compare);
- current_cache_g = NULL;
-#ifndef NDEBUG
- for (u = 1; u < nslots; u++)
- assert(H5F_addr_lt(cache->slot[map[u - 1]]->addr, cache->slot[map[u]]->addr));
-#endif
-
- /*
- * Look at all cache entries.
- */
- for (u = 0; u < nslots; u++) {
- info = cache->slot + map[u];
- assert(*info);
- if (!type || type == (*info)->type) {
-#ifdef H5AC_DEBUG
- H5AC_subid_t type_id=(*info)->type->id; /* Remember this for later */
-#endif /* H5AC_DEBUG */
-
- /* Clear the dirty flag only, if requested */
- if(clear_only) {
- /* Call the callback routine to clear all dirty flags for object */
- if(((*info)->type->clear)(f, *info, destroy)<0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to clear cache");
- } /* end if */
- else {
- flush = (*info)->type->flush;
-
- /* Only block for all the processes on the first piece of metadata */
- if(first_flush && (*info)->dirty) {
- status = (flush)(f, dxpl_id, destroy, (*info)->addr, (*info));
- first_flush=0;
- } /* end if */
- else
- status = (flush)(f, H5AC_noblock_dxpl_id, destroy, (*info)->addr, (*info));
- if (status < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush cache")
-#ifdef H5AC_DEBUG
- cache->diagnostics[type_id].nflushes++;
-#endif /* H5AC_DEBUG */
- } /* end else */
-
- /* Destroy entry also, if asked */
- if (destroy)
- (*info)= NULL;
- }
- }
+ if ( status < 0 ) {
- /*
- * If there are protected object then fail. However, everything
- * else should have been flushed.
- */
- if (cache->nprots > 0)
- HGOTO_ERROR(H5E_CACHE, H5E_PROTECT, FAIL, "cache has protected items")
- } else {
- u = H5AC_HASH(f, addr);
- info = cache->slot + u;
-#ifdef H5_HAVE_PARALLEL
- /* If MPI based VFD is used, do special parallel I/O actions */
- if(IS_H5FD_MPI(f)) {
- H5AC_info_t **dinfo;
-#ifdef H5AC_DEBUG
- H5AC_subid_t type_id;
-#endif /* H5AC_DEBUG */
-#ifndef NDEBUG
- H5P_genplist_t *dxpl; /* Dataset transfer property list */
- H5FD_mpio_xfer_t xfer_mode; /* I/O transfer mode property value */
-
- /* Get the dataset transfer property list */
- if (NULL == (dxpl = H5I_object(dxpl_id)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset creation property list")
-
- /* Get the transfer mode property */
- if(H5P_get(dxpl, H5D_XFER_IO_XFER_MODE_NAME, &xfer_mode) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't retrieve xfer mode")
-
- /* Sanity check transfer mode */
- assert(xfer_mode==H5FD_MPIO_COLLECTIVE);
-#endif /* NDEBUG */
-
- dinfo = cache->dslot + u;
-
- /* Restore dirty metadata from 'held' slot to 'current' slot */
- if((*dinfo)!=NULL) {
- H5AC_dest_func_t dest;
-
- /* Various sanity checks */
- assert((*dinfo)->dirty);
- assert((*info)!=NULL);
- assert((*info)->dirty==0);
-
-#ifdef H5AC_DEBUG
- type_id=(*info)->type->id; /* Remember this for later */
-#endif /* H5AC_DEBUG */
-
- /* Destroy 'current' information */
- dest = (*info)->type->dest;
- if ((dest)(f, (*info))<0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "unable to free cached object")
-
- /* Restore 'held' information back to 'current' information */
- (*info)=(*dinfo);
-
- /* Clear 'held' information */
- (*dinfo)=NULL;
-
-#ifdef H5AC_DEBUG
- cache->diagnostics[type_id].nrestores++;
-#endif /* H5AC_DEBUG */
- } /* end if */
- } /* end if */
-#endif /* H5_HAVE_PARALLEL */
- if ((*info) && (!type || (*info)->type == type) &&
- H5F_addr_eq((*info)->addr, addr)) {
-#ifdef H5AC_DEBUG
- H5AC_subid_t type_id=(*info)->type->id; /* Remember this for later */
-#endif /* H5AC_DEBUG */
-
- /*
- * Flush just this entry.
- */
-
- /* Clear the dirty flag only, if requested */
- if(clear_only) {
- /* Call the callback routine to clear all dirty flags for object */
- if(((*info)->type->clear)(f, *info, destroy)<0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to clear cache");
- } /* end if */
- else {
- flush = (*info)->type->flush;
- if((flush)(f, dxpl_id, destroy, (*info)->addr, (*info)) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush object")
-#ifdef H5AC_DEBUG
- cache->diagnostics[type_id].nflushes++;
-#endif /* H5AC_DEBUG */
- } /* end else */
-
- /* Destroy entry also, if asked */
- if (destroy)
- (*info)= NULL;
- } /* end if */
- } /* end else */
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush entry.")
+ }
done:
- if(map!=NULL)
- map = H5FL_SEQ_FREE(unsigned,map);
FUNC_LEAVE_NOAPI(ret_value)
-}
+
+} /* H5AC_flush() */
/*-------------------------------------------------------------------------
@@ -824,164 +578,138 @@ done:
* Added automatic "flush" if the FPHDF5 driver is being
* used. This'll write the metadata to the SAP where other,
* lesser processes can grab it.
+ *
+ * JRM - 5/13/04
+ * Complete re-write for the new metadata cache. The new
+ * code is functionally almost identical to the old, although
+ * the sanity check for a protected entry is now an assert
+ * at the beginning of the function.
+ *
+ * JRM - 6/7/04
+ * Abstracted the guts of the function to H5C_insert_entry()
+ * in H5C.c, and then re-wrote the function as a wrapper for
+ * H5C_insert_entry().
+ *
*-------------------------------------------------------------------------
*/
+
herr_t
-H5AC_set(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t addr, void *thing)
+H5AC_set(H5F_t *f,
+ hid_t dxpl_id,
+ const H5AC_class_t *type,
+ haddr_t addr,
+ void *thing)
{
- unsigned idx;
- H5AC_info_t **info;
- H5AC_t *cache;
- herr_t ret_value = SUCCEED; /* Return value */
+ herr_t result;
+ herr_t ret_value = SUCCEED; /* Return value */
+ H5AC_info_t * info_ptr;
+ H5AC_t * cache_ptr;
FUNC_ENTER_NOAPI(H5AC_set, FAIL)
- assert(f);
- assert(f->shared->cache);
- assert(type);
- assert(type->flush);
- assert(H5F_addr_defined(addr));
- assert(thing);
+ HDassert(f);
+ HDassert(f->shared->cache);
+ HDassert(type);
+ HDassert(type->flush);
+ HDassert(type->size);
+ HDassert(H5F_addr_defined(addr));
+ HDassert(thing);
- /* Get local copy of this information */
- idx = H5AC_HASH(f, addr);
- cache = f->shared->cache;
- info = cache->slot + idx;
-
-#ifdef H5AC_DEBUG
- {
- H5AC_prot_t *prot = NULL;
- int i;
+ cache_ptr = (H5AC_t *)(f->shared->cache);
+ info_ptr = (H5AC_info_t *)thing;
- prot = cache->prot + idx;
- for (i = 0; i < prot->nprots; i++)
- assert(H5F_addr_ne(addr, prot->slot[i]->addr));
- }
-#endif
+ info_ptr->addr = addr;
+ info_ptr->type = type;
+ info_ptr->protected = FALSE;
#ifdef H5_HAVE_PARALLEL
- /*
- * If MPI based VFD is used, do special parallel I/O actions
+#ifdef H5_HAVE_FPHDF5
+ /* In the flexible parallel case, the cache is always empty. Thus
+ * we simply flush and destroy entry we have just received.
*/
- if(IS_H5FD_MPI(f)) {
- H5AC_info_t **dinfo;
- H5P_genplist_t *dxpl; /* Dataset transfer property list */
- H5FD_mpio_xfer_t xfer_mode; /* I/O transfer mode property value */
-#ifdef H5AC_DEBUG
- H5AC_subid_t type_id;
-#endif /* H5AC_DEBUG */
+ {
+ H5FD_t * lf;
+ unsigned req_id;
+ H5FP_status_t status;
- /* Get the dataset transfer property list */
- if (NULL == (dxpl = H5I_object(dxpl_id)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset creation property list")
+ HDassert(f->shared->lf);
- /* Get the transfer mode property */
- if(H5P_get(dxpl, H5D_XFER_IO_XFER_MODE_NAME, &xfer_mode) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't retrieve xfer mode")
-
- /* Get pointer to 'held' information */
- dinfo = cache->dslot + idx;
-
- /* Sanity check transfer mode */
- if(xfer_mode==H5FD_MPIO_COLLECTIVE) {
- /* Check for dirty metadata */
- if(*dinfo) {
- H5AC_dest_func_t dest;
-
- /* Various sanity checks */
- assert((*dinfo)->dirty);
- assert((*info)!=NULL);
- assert((*info)->dirty==0);
-
-#ifdef H5AC_DEBUG
- type_id=(*info)->type->id; /* Remember this for later */
-#endif /* H5AC_DEBUG */
-
- /* Destroy 'current' information */
- dest = (*info)->type->dest;
- if ((dest)(f, (*info))<0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "unable to free cached object")
-
- /* Restore 'held' information back to 'current' information */
- (*info)=(*dinfo);
-
- /* Clear 'held' information */
- (*dinfo)=NULL;
-
-#ifdef H5AC_DEBUG
- cache->diagnostics[type_id].nrestores++;
-#endif /* H5AC_DEBUG */
- } /* end if */
- } /* end if */
- else {
- /* Sanity check */
- assert((*dinfo)==NULL);
- assert(xfer_mode==H5FD_MPIO_INDEPENDENT);
-
- /* Make certain there will be no write of dirty metadata */
- if((*info) && (*info)->dirty) {
- /* Sanity check new item */
- assert(((H5AC_info_t*)thing)->dirty==0);
-
- /* 'Hold' the current metadata for later */
- (*dinfo)=(*info);
-
- /* Reset the 'current' metadata, so it doesn't get flushed */
- (*info)=NULL;
-
-#ifdef H5AC_DEBUG
- cache->diagnostics[(*dinfo)->type->id].nholds++;
-#endif /* H5AC_DEBUG */
- } /* end if */
- } /* end else */
- } /* end if */
-#endif /* H5_HAVE_PARALLEL */
+ lf = f->shared->lf;
- /* Flush any object already in cache slot */
- if ((*info)) {
-#ifdef H5AC_DEBUG
- H5AC_subid_t type_id=(*info)->type->id; /* Remember this for later */
-#endif /* H5AC_DEBUG */
+ if ( H5FD_is_fphdf5_driver(lf) ) {
- if ((*info)->type->flush(f, dxpl_id, TRUE, (*info)->addr, (*info)) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush object")
+ /*
+ * This is the FPHDF5 driver. Grab a lock for this piece of
+ * metadata from the SAP. Bail-out quickly if we're unable to do
+ * that. In the case of the FPHDF5 driver, the local cache is
+ * turned off. We lock the address then write the data to the SAP.
+ * We do this because the cache is off and thus cannot retain the
+ * data which has just been added to it. We will get it from the
+ * SAP as needed in the future.
+ */
+ result = H5FP_request_lock(H5FD_fphdf5_file_id(lf), addr,
+ H5FP_LOCK_WRITE, TRUE, &req_id, &status);
-#ifdef H5AC_DEBUG
- cache->diagnostics[type_id].nflushes++;
-#endif /* H5AC_DEBUG */
- } /* end if */
+ if ( result < 0 ) {
+#if 0
+ HDfprintf(stdout, "H5AC_set: Lock failed.\n");
+ /*
+ * FIXME: Check the status variable. If the lock is got
+ * by some other process, we can loop and wait or bail
+ * out of this function
+ */
+ HDfprintf(stderr,
+ "Couldn't get lock for metadata at address %a\n",
+ addr);
+#endif /* 0 */
+ HGOTO_ERROR(H5E_FPHDF5, H5E_CANTLOCK, FAIL, \
+ "can't lock data on SAP!")
+ }
- /* Cache this item */
- (*info) = thing;
- (*info)->type = type;
- (*info)->addr = addr;
+ /* write the metadata to the SAP. */
-#ifdef H5_HAVE_FPHDF5
- if (H5FD_is_fphdf5_driver(f->shared->lf)) {
-#ifdef H5AC_DEBUG
- H5AC_subid_t type_id = (*info)->type->id; /* Remember this for later */
-#endif /* H5AC_DEBUG */
-
- /*
- * We want to write this metadata to the SAP right now. This will
- * keep all of the participating processes in sync.
- */
- if ((*info)->type->flush(f, dxpl_id, FALSE, (*info)->addr, *info) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush object")
-
-#ifdef H5AC_DEBUG
- ++cache->diagnostics[type_id].nflushes;
-#endif /* H5AC_DEBUG */
+ result = (info_ptr->type->flush)(f, dxpl_id, TRUE,
+ info_ptr->addr, info_ptr);
+
+ if ( result < 0 ) {
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "unable to flush entry")
+ }
+
+ /* and then release the lock */
+
+ result = H5FP_request_release_lock(H5FD_fphdf5_file_id(lf), addr,
+ TRUE, &req_id, &status);
+ if ( result < 0 ) {
+
+ HGOTO_ERROR(H5E_FPHDF5, H5E_CANTUNLOCK, FAIL, \
+ "can't unlock data on SAP!")
+ }
+
+ HGOTO_DONE(SUCCEED);
+ }
}
#endif /* H5_HAVE_FPHDF5 */
+#endif /* H5_HAVE_PARALLEL */
-#ifdef H5AC_DEBUG
- ++cache->diagnostics[type->id].ninits;
-#endif /* H5AC_DEBUG */
+ result = H5C_insert_entry(f,
+ dxpl_id,
+ H5AC_noblock_dxpl_id,
+ cache_ptr,
+ type,
+ addr,
+ thing);
+
+ if ( result < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "H5C_insert_entry() failed")
+ }
done:
+
FUNC_LEAVE_NOAPI(ret_value)
-}
+
+} /* H5AC_set() */
/*-------------------------------------------------------------------------
@@ -1003,206 +731,101 @@ done:
* Modifications:
* Robb Matzke, 1999-07-27
* The OLD_ADDR and NEW_ADDR arguments are passed by value.
+ *
+ * JRM 5/17/04
+ * Complete rewrite for the new meta-data cache.
+ *
+ * JRM - 6/7/04
+ * Abstracted the guts of the function to H5C_rename_entry()
+ * in H5C.c, and then re-wrote the function as a wrapper for
+ * H5C_rename_entry().
+ *
*-------------------------------------------------------------------------
*/
+
herr_t
-H5AC_rename(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t old_addr,
+H5AC_rename(H5F_t *f,
+ hid_t UNUSED dxpl_id,
+ const H5AC_class_t *type,
+ haddr_t old_addr,
haddr_t new_addr)
{
- unsigned old_idx, new_idx;
- H5AC_flush_func_t flush;
- H5AC_t *cache;
- H5AC_info_t **new_info = NULL;
- H5AC_info_t **old_info = NULL;
- herr_t ret_value=SUCCEED; /* Return value */
+ herr_t result;
+ herr_t ret_value = SUCCEED; /* Return value */
+ H5AC_t * cache_ptr;
FUNC_ENTER_NOAPI(H5AC_rename, FAIL)
- assert(f);
- assert(f->shared->cache);
- assert(type);
+ HDassert(f);
+ HDassert(f->shared->cache);
+ HDassert(type);
+ HDassert(H5F_addr_defined(old_addr));
+ HDassert(H5F_addr_defined(new_addr));
+ HDassert(H5F_addr_ne(old_addr, new_addr));
- /* Get local copy of this information */
- old_idx = H5AC_HASH(f, old_addr);
- new_idx = H5AC_HASH(f, new_addr);
- cache = f->shared->cache;
- new_info = cache->slot + new_idx;
- old_info = cache->slot + old_idx;
-
-#ifdef H5AC_DEBUG
- {
- H5AC_prot_t *prot = NULL;
- int i;
-
- prot = cache->prot + old_idx;
- for (i = 0; i < prot->nprots; i++)
- assert(H5F_addr_ne(old_addr, prot->slot[i]->addr));
- prot = cache->prot + new_idx;
- for (i = 0; i < prot->nprots; i++)
- assert(H5F_addr_ne(new_addr, prot->slot[i]->addr));
- }
-#endif
+ cache_ptr = (H5AC_t *)(f->shared->cache);
- /*
- * We don't need to do anything if the object isn't cached or if the
- * new hash value is the same as the old one.
+#ifdef H5_HAVE_PARALLEL
+#ifdef H5_HAVE_FPHDF5
+ /* In the flexible parallel case, the cache is always empty.
+ * Thus H5AC_rename() has nothing to do by definition.
*/
- assert(old_info);
- if (H5F_addr_ne((*old_info)->addr, old_addr) || (*old_info)->type!=type)
- HGOTO_DONE(SUCCEED)
- if (old_idx == new_idx) {
- (*old_info)->addr = new_addr;
- HGOTO_DONE(SUCCEED)
- }
+ {
+ H5FD_t * lf;
-#ifdef H5_HAVE_PARALLEL
- /* If MPI based VFD is used, do special parallel I/O actions */
- if(IS_H5FD_MPI(f)) {
- H5AC_info_t **new_dinfo;
-#ifdef H5AC_DEBUG
- H5AC_subid_t type_id;
-#endif /* H5AC_DEBUG */
- H5P_genplist_t *dxpl; /* Dataset transfer property list */
- H5FD_mpio_xfer_t xfer_mode; /* I/O transfer mode property value */
+ HDassert(f->shared->lf);
- /* Get the dataset transfer property list */
- if (NULL == (dxpl = H5I_object(dxpl_id)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset creation property list")
+ lf = f->shared->lf;
- /* Get the transfer mode property */
- if(H5P_get(dxpl, H5D_XFER_IO_XFER_MODE_NAME, &xfer_mode) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't retrieve xfer mode")
-
- /* Get pointer to new 'held' information */
- new_dinfo = cache->dslot + new_idx;
-
- /* Sanity check transfer mode */
- if(xfer_mode==H5FD_MPIO_COLLECTIVE) {
- /* Check for dirty metadata */
- if(*new_dinfo) {
- H5AC_dest_func_t dest;
-
- /* Various sanity checks */
- assert((*new_dinfo)->dirty);
- assert((*new_info)!=NULL);
- assert((*new_info)->dirty==0);
-
-#ifdef H5AC_DEBUG
- type_id=(*new_info)->type->id; /* Remember this for later */
-#endif /* H5AC_DEBUG */
-
- /* Destroy 'current' information */
- dest = (*new_info)->type->dest;
- if ((dest)(f, (*new_info))<0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "unable to free cached object")
-
- /* Restore 'held' information back to 'current' information */
- (*new_info)=(*new_dinfo);
-
- /* Clear 'held' information */
- (*new_dinfo)=NULL;
-
-#ifdef H5AC_DEBUG
- cache->diagnostics[type_id].nrestores++;
-#endif /* H5AC_DEBUG */
- } /* end if */
- } /* end if */
- else {
- /* Sanity check that there will be no write of dirty metadata */
- assert((*new_dinfo)==NULL);
- assert(xfer_mode==H5FD_MPIO_INDEPENDENT);
-
- /* Make certain there will be no write of dirty metadata */
- if((*new_info) && (*new_info)->dirty) {
- /* Sanity check that we won't put two pieces of dirty metadata in same cache location */
- assert((*old_info)->dirty==0);
-
- /* 'Hold' the current metadata for later */
- (*new_dinfo)=(*new_info);
-
- /* Reset the 'current' metadata, so it doesn't get flushed */
- (*new_info)=NULL;
-
-#ifdef H5AC_DEBUG
- cache->diagnostics[(*new_dinfo)->type->id].nholds++;
-#endif /* H5AC_DEBUG */
- } /* end if */
- } /* end else */
- } /* end if */
-#endif /* H5_HAVE_PARALLEL */
+ if ( H5FD_is_fphdf5_driver(lf) ) {
- /*
- * Free the item from the destination cache line.
- */
- if (*new_info) {
-#ifdef H5AC_DEBUG
- H5AC_subid_t type_id=(*new_info)->type->id; /* Remember this for later */
-#endif /* H5AC_DEBUG */
-
- flush = (*new_info)->type->flush;
- if ( (flush)(f, dxpl_id, TRUE, (*new_info)->addr, (*new_info)) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush object")
-#ifdef H5AC_DEBUG
- cache->diagnostics[type_id].nflushes++;
-#endif /* H5AC_DEBUG */
+ HGOTO_DONE(SUCCEED);
+ }
}
+#endif /* H5_HAVE_FPHDF5 */
+#endif /* H5_HAVE_PARALLEL */
- /*
- * Move the source to the destination (it might not be cached)
- */
- (*new_info)= (*old_info);
- (*new_info)->addr = new_addr;
+ result = H5C_rename_entry(f,
+ cache_ptr,
+ type,
+ old_addr,
+ new_addr);
-#ifdef H5_HAVE_PARALLEL
- /* If MPI based VFD is used, do special parallel I/O actions */
- if(IS_H5FD_MPI(f)) {
- H5AC_info_t **old_dinfo;
-#ifdef H5AC_DEBUG
- H5AC_subid_t type_id;
-#endif /* H5AC_DEBUG */
-
- /* Get pointer to new 'held' information */
- old_dinfo = cache->dslot + old_idx;
-
- /* Check for 'held' metadata in old location & restore it, if so */
- if(*old_dinfo) {
- /* Sanity check */
- assert((*old_dinfo)->dirty);
-
-#ifdef H5AC_DEBUG
- type_id=(*old_info)->type->id; /* Remember this for later */
-#endif /* H5AC_DEBUG */
-
- /* Restore 'held' information back to 'current' information */
- (*old_info)=(*old_dinfo);
-
- /* Clear 'held' information */
- (*old_dinfo)=NULL;
-
-#ifdef H5AC_DEBUG
- cache->diagnostics[type_id].nrestores++;
-#endif /* H5AC_DEBUG */
- } /* end if */
- else
- (*old_info)= NULL;
- } /* end if */
- else {
-#endif /* H5_HAVE_PARALLEL */
+ if ( result < 0 ) {
- (*old_info)= NULL;
-#ifdef H5_HAVE_PARALLEL
- } /* end else */
-#endif /* H5_HAVE_PARALLEL */
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTRENAME, FAIL, \
+ "H5C_rename_entry() failed.")
+ }
done:
+
FUNC_LEAVE_NOAPI(ret_value)
-}
+
+} /* H5AC_rename() */
/*-------------------------------------------------------------------------
* Function: H5AC_protect
*
- * Purpose: Similar to H5AC_find() except the object is removed from
+ * Purpose: If the target entry is not in the cache, load it. If
+ * necessary, attempt to evict one or more entries to keep
+ * the cache within its maximum size.
+ *
+ * Mark the target entry as protected, and return its address
+ * to the caller. The caller must call H5AC_unprotect() when
+ * finished with the entry.
+ *
+ * While it is protected, the entry may not be either evicted
+ * or flushed -- nor may it be accessed by another call to
+ * H5AC_protect. Any attempt to do so will result in a failure.
+ *
+ * This comment is a re-write of the original Purpose: section.
+ * For historical interest, the original version is reproduced
+ * below:
+ *
+ * Original Purpose section:
+ *
+ * Similar to H5AC_find() except the object is removed from
* the cache and given to the caller, preventing other parts
* of the program from modifying the protected object or
* preempting it from the cache.
@@ -1228,261 +851,190 @@ done:
* Bill Wendling, 2003-09-10
* Added parameter to indicate whether this is a READ or
* WRITE type of protect.
+ *
+ * JRM -- 5/17/04
+ * Complete re-write for the new client cache. See revised
+ * Purpose section above.
+ *
+ * JRM - 6/7/04
+ * Abstracted the guts of the function to H5C_protect()
+ * in H5C.c, and then re-wrote the function as a wrapper for
+ * H5C_protect().
+ *
*-------------------------------------------------------------------------
*/
+
void *
-H5AC_protect(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t addr,
- const void *udata1, void *udata2, H5AC_protect_t
+H5AC_protect(H5F_t *f,
+ hid_t dxpl_id,
+ const H5AC_class_t *type,
+ haddr_t addr,
+ const void *udata1,
+ void *udata2,
+ H5AC_protect_t
#ifndef H5_HAVE_FPHDF5
UNUSED
#endif /* H5_HAVE_FPHDF5 */
rw)
{
- unsigned idx; /* Index in cache */
- void *thing = NULL;
- H5AC_t *cache = NULL;
- H5AC_info_t **info;
- void *ret_value; /* Return value */
-#ifdef H5_HAVE_FPHDF5
- H5FD_t *lf;
- unsigned req_id;
- H5FP_status_t status;
-#endif /* H5_HAVE_FPHDF5 */
-
-#ifdef H5AC_DEBUG
- H5AC_prot_t *prot = NULL;
- static int ncalls = 0;
-
- if (0 == ncalls++) {
- if (H5DEBUG(AC)) {
- fprintf(H5DEBUG(AC), "H5AC: debugging cache (expensive)\n");
- }
- }
-#endif
+ void * thing = NULL;
+ H5AC_t * cache_ptr = NULL;
+ void * ret_value; /* Return value */
FUNC_ENTER_NOAPI(H5AC_protect, NULL)
/* check args */
- assert(f);
- assert(f->shared->cache);
- assert(type);
- assert(type->load);
- assert(type->flush);
- assert(H5F_addr_defined(addr));
-
- /* Get local copy of this information */
- idx = H5AC_HASH(f, addr);
- cache = f->shared->cache;
- info = cache->slot + idx;
-#ifdef H5AC_DEBUG
- prot = cache->prot + idx;
-#endif /* H5AC_DEBUG */
+ HDassert(f);
+ HDassert(f->shared->cache);
+ HDassert(type);
+ HDassert(type->flush);
+ HDassert(type->load);
+ HDassert(H5F_addr_defined(addr));
+
+ cache_ptr = (H5AC_t *)(f->shared->cache);
#ifdef H5_HAVE_PARALLEL
#ifdef H5_HAVE_FPHDF5
- lf = f->shared->lf;
-
- if (H5FD_is_fphdf5_driver(lf)) {
- /*
- * This is the FPHDF5 driver. Grab a lock for this piece of
- * metadata from the SAP. Bail-out quickly if we're unable to do
- * that. In the case of the FPHDF5 driver, the local cache is
- * effectively turned off. We lock the address then load the data
- * from the SAP (or file) directly. We do this because at any one
- * time the data on the SAP will be different than what's on the
- * local process.
- */
- if (H5FP_request_lock(H5FD_fphdf5_file_id(lf), addr,
- rw == H5AC_WRITE ? H5FP_LOCK_WRITE : H5FP_LOCK_READ,
- TRUE, &req_id, &status) < 0) {
-#if 0
- HDfprintf(stdout, "H5AC_protect: Lock failed.\n");
+ /* The following code to support flexible parallel is a direct copy
+ * from the old version of the cache with slight edits. It should
+ * be viewed with as much suspicion as the rest of the FP code.
+ * JRM - 5/26/04
+ */
+ {
+ H5FD_t * lf;
+ unsigned req_id;
+ H5FP_status_t status;
+ H5AC_info_t * info_ptr;
+
+ HDassert(f->shared->lf);
+
+ lf = f->shared->lf;
+
+ if ( H5FD_is_fphdf5_driver(lf) ) {
+
/*
- * FIXME: Check the status variable. If the lock is got
- * by some other process, we can loop and wait or bail
- * out of this function
+ * This is the FPHDF5 driver. Grab a lock for this piece of
+ * metadata from the SAP. Bail-out quickly if we're unable to do
+ * that. In the case of the FPHDF5 driver, the local cache is
+ * effectively turned off. We lock the address then load the data
+ * from the SAP (or file) directly. We do this because at any one
+ * time the data on the SAP will be different than what's on the
+ * local process.
*/
- HDfprintf(stderr, "Couldn't get lock for metadata at address %a\n", addr);
+ if ( H5FP_request_lock(H5FD_fphdf5_file_id(lf), addr,
+ rw == H5AC_WRITE ? H5FP_LOCK_WRITE : H5FP_LOCK_READ,
+ TRUE, &req_id, &status) < 0) {
+#if 0
+ HDfprintf(stdout, "H5AC_protect: Lock failed.\n");
+ /*
+ * FIXME: Check the status variable. If the lock is got
+ * by some other process, we can loop and wait or bail
+ * out of this function
+ */
+ HDfprintf(stderr,
+ "Couldn't get lock for metadata at address %a\n",
+ addr);
#endif /* 0 */
- HGOTO_ERROR(H5E_FPHDF5, H5E_CANTLOCK, NULL, "can't lock data on SAP!")
- }
+ HGOTO_ERROR(H5E_FPHDF5, H5E_CANTLOCK, NULL, \
+ "can't lock data on SAP!")
+ }
+
+ /* Load a thing from the SAP. */
+ if ( NULL == (thing = type->load(f, dxpl_id, addr,
+ udata1, udata2)) ) {
- /* Load a thing from the SAP. */
- if (NULL == (thing = type->load(f, dxpl_id, addr, udata1, udata2))) {
#if 0
- HDfprintf(stdout,
- "%s: Load failed. addr = %a, type->id = %d.\n",
- "H5AC_protect",
- addr,
- (int)(type->id));
+ HDfprintf(stdout,
+ "%s: Load failed. addr = %a, type->id = %d.\n",
+ "H5AC_protect",
+ addr,
+ (int)(type->id));
#endif /* 0 */
- HCOMMON_ERROR(H5E_CACHE, H5E_CANTLOAD, "unable to load object")
+ HCOMMON_ERROR(H5E_CACHE, H5E_CANTLOAD, "unable to load object")
- if (H5FP_request_release_lock(H5FD_fphdf5_file_id(lf), addr,
- TRUE, &req_id, &status) < 0)
- HGOTO_ERROR(H5E_FPHDF5, H5E_CANTUNLOCK, NULL, "can't unlock data on SAP!")
+ if (H5FP_request_release_lock(H5FD_fphdf5_file_id(lf), addr,
+ TRUE, &req_id, &status) < 0)
+ HGOTO_ERROR(H5E_FPHDF5, H5E_CANTUNLOCK, NULL, \
+ "can't unlock data on SAP!")
- HGOTO_DONE(NULL);
- }
+ HGOTO_DONE(NULL);
+ }
+
+ info_ptr = (H5AC_info_t *)thing;
- HGOTO_DONE(thing);
- }
-#endif /* H5_HAVE_FPHDF5 */
+ HDassert(info_ptr->dirty == FALSE);
- /* If MPI based VFD is used, do special parallel I/O actions */
- if (IS_H5FD_MPI(f)) {
- H5AC_info_t **dinfo;
-
- /* Get pointer to new 'held' information */
- dinfo = cache->dslot + idx;
-
- /* Check for 'held' metadata in location & handle it */
- if(*dinfo) {
- /* Sanity checks */
- assert((*dinfo)->dirty);
- assert((*info));
- assert((*info)->dirty == FALSE);
- assert((*dinfo)->addr != (*info)->addr);
-
- /* Is 'held' metadata the metadata we are looking for? */
- if (H5F_addr_eq((*dinfo)->addr, addr)
-#ifdef H5AC_DEBUG
- && (*dinfo)->type==type
-#endif /* H5AC_DEBUG */
- ) {
-#ifndef H5AC_DEBUG
- /* Sanity check that the object in the cache is the correct type */
- assert((*dinfo)->type == type);
-#endif /* H5AC_DEBUG */
-
- /* The object is already cached; simply remove it from the cache. */
- thing = (*dinfo);
- (*dinfo)->type = NULL;
- (*dinfo)->addr = HADDR_UNDEF;
- (*dinfo) = NULL;
-#ifdef H5AC_DEBUG
- ++cache->diagnostics[(*dinfo)->type->id].nhits;
-#endif /* H5AC_DEBUG */
- } /* end if */
- else {
- /*
- * 'held' metadata isn't what we are looking for, but
- * check for 'current' metadata
- */
- if (H5F_addr_eq((*info)->addr, addr)
-#ifdef H5AC_DEBUG
- && (*info)->type==type
-#endif /* H5AC_DEBUG */
- ) {
-#ifndef H5AC_DEBUG
- /* Sanity check that the object in the cache is the correct type */
- assert((*info)->type == type);
-#endif /* H5AC_DEBUG */
-
- /*
- * The object is already cached; remove it from the cache.
- * and bring the 'held' object into the 'regular' information
- */
- thing = (*info);
- (*info)->type = NULL;
- (*info)->addr = HADDR_UNDEF;
- (*info) = (*dinfo);
- (*dinfo) = NULL;
-#ifdef H5AC_DEBUG
- ++cache->diagnostics[(*info)->type->id].nhits;
-#endif /* H5AC_DEBUG */
- } /* end if */
- } /* end else */
- } /* end if */
- } /* end if */
+ info_ptr->addr = addr;
+ info_ptr->type = type;
+ info_ptr->protected = TRUE;
- /* Check if we've already found the object to protect */
- if (thing == NULL) {
-#endif /* H5_HAVE_PARALLEL */
+ if ( (type->size)(f, thing, &(info_ptr->size)) < 0 ) {
- if ((*info) && H5F_addr_eq(addr,(*info)->addr)
-#ifdef H5AC_DEBUG
- && (*info)->type == type
-#endif /* H5AC_DEBUG */
- ) {
-#ifndef H5AC_DEBUG
- /* Sanity check that the object in the cache is the correct type */
- assert((*info)->type == type);
-#endif /* H5AC_DEBUG */
-
- /* The object is already cached; simply remove it from the cache. */
- thing = (*info);
- (*info)->type = NULL;
- (*info)->addr = HADDR_UNDEF;
- (*info) = NULL;
-#ifdef H5AC_DEBUG
- ++cache->diagnostics[(*info)->type->id].nhits;
-#endif /* H5AC_DEBUG */
- } else {
-#ifdef H5AC_DEBUG
- /*
- * Check that the requested thing isn't protected, for protected things
- * can only be modified through the pointer already handed out by the
- * H5AC_protect() function.
- */
- int i;
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGETSIZE, NULL, \
+ "Can't get size of thing")
+ }
- for (i = 0; i < prot->nprots; i++)
- assert(H5F_addr_ne(addr, prot->slot[i]->addr));
-#endif /* H5AC_DEBUG */
+ HDassert(info_ptr->size < H5C_MAX_ENTRY_SIZE);
- /*
- * Load a new thing. If it can't be loaded, then return an error
- * without preempting anything.
- */
- if (NULL == (thing = (type->load)(f, dxpl_id, addr, udata1, udata2)))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "unable to load object")
+ info_ptr->next = NULL;
+ info_ptr->prev = NULL;
+ info_ptr->aux_next = NULL;
+ info_ptr->aux_prev = NULL;
-#ifdef H5AC_DEBUG
- ++cache->diagnostics[type->id].nmisses;
-#endif /* H5AC_DEBUG */
+ HGOTO_DONE(thing);
}
-#ifdef H5_HAVE_PARALLEL
- } /* end if */
-#endif /* H5_HAVE_PARALLEL */
+ }
+#endif /* H5_HAVE_FPHDF5 */
+#endif /* H5_HAVE_PARALLEL */
-#ifdef H5AC_DEBUG
- /*
- * Add the protected object to the protect debugging fields of the
- * cache.
- */
- if (prot->nprots >= prot->aprots) {
- size_t na = prot->aprots + 10;
- H5AC_info_t **x;
+ thing = H5C_protect(f,
+ dxpl_id,
+ H5AC_noblock_dxpl_id,
+ cache_ptr,
+ type,
+ addr,
+ udata1,
+ udata2);
- if (NULL == (x = H5MM_realloc(prot->slot, na * sizeof(H5AC_info_t *))))
- HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
+ if ( thing == NULL ) {
- prot->aprots = (int)na;
- prot->slot = x;
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C_protect() failed.")
}
- prot->slot[prot->nprots]= thing;
- prot->slot[prot->nprots]->type = type;
- prot->slot[prot->nprots]->addr = addr;
- ++prot->nprots;
-#endif /* H5AC_DEBUG */
-
/* Set return value */
ret_value = thing;
done:
- if (ret_value)
- ++cache->nprots;
FUNC_LEAVE_NOAPI(ret_value)
-}
+
+} /* H5AC_protect() */
/*-------------------------------------------------------------------------
* Function: H5AC_unprotect
*
- * Purpose: This function should be called to undo the effect of
+ * Purpose: Undo an H5AC_protect() call -- specifically, mark the
+ * entry as unprotected, remove it from the protected list,
+ * and give it back to the replacement policy.
+ *
+ * The TYPE and ADDR arguments must be the same as those in
+ * the corresponding call to H5AC_protect() and the THING
+ * argument must be the value returned by that call to
+ * H5AC_protect().
+ *
+ * If the deleted flag is TRUE, simply remove the target entry
+ * from the cache, clear it, and free it without writing it to
+ * disk.
+ *
+ * This verion of the function is a complete re-write to
+ * use the new metadata cache. While there isn't all that
+ * much difference between the old and new Purpose sections,
+ * the original version is given below.
+ *
+ * Original purpose section:
+ *
+ * This function should be called to undo the effect of
* H5AC_protect(). The TYPE and ADDR arguments should be the
* same as the corresponding call to H5AC_protect() and the
* THING argument should be the value returned by H5AC_protect().
@@ -1509,212 +1061,127 @@ done:
* Bill Wendling, 2003-09-18
* If this is an FPHDF5 driver and the data is dirty,
* perform a "flush" that writes the data to the SAP.
+ *
+ * John Mainzer 5/19/04
+ * Complete re-write for the new metadata cache.
+ *
+ * JRM - 6/7/04
+ * Abstracted the guts of the function to H5C_unprotect()
+ * in H5C.c, and then re-wrote the function as a wrapper for
+ * H5C_unprotect().
+ *
*-------------------------------------------------------------------------
*/
herr_t
-H5AC_unprotect(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t addr,
- void *thing, hbool_t deleted)
+H5AC_unprotect(H5F_t *f,
+ hid_t dxpl_id,
+ const H5AC_class_t *type,
+ haddr_t addr,
+ void *thing,
+ hbool_t deleted)
{
- unsigned idx;
- H5AC_flush_func_t flush;
- H5AC_t *cache = NULL;
- H5AC_info_t **info;
+ herr_t result;
herr_t ret_value = SUCCEED; /* Return value */
-#ifdef H5_HAVE_FPHDF5
- H5FD_t *lf;
- unsigned req_id;
- H5FP_status_t status;
-#endif /* H5_HAVE_FPHDF5 */
-
+ H5AC_info_t * info_ptr;
+ H5AC_t * cache_ptr = NULL;
FUNC_ENTER_NOAPI(H5AC_unprotect, FAIL)
- /* check args */
- assert(f);
- assert(f->shared->cache);
- assert(type);
- assert(type->flush);
- assert(H5F_addr_defined(addr));
- assert(thing);
-
- /* Get local copy of this information */
- idx = H5AC_HASH(f, addr);
- cache = f->shared->cache;
- info = cache->slot + idx;
-
-#if defined(H5_HAVE_PARALLEL) && defined(H5_HAVE_FPHDF5)
- lf = f->shared->lf;
-
- if (H5FD_is_fphdf5_driver(lf)) {
- /*
- * FIXME: If the metadata is *really* deleted at this point
- * (deleted == TRUE), we need to send a request to the SAP
- * telling it to remove that bit of metadata from its cache.
- */
- if (H5FP_request_release_lock(H5FD_fphdf5_file_id(lf), addr,
- TRUE, &req_id, &status) < 0)
- HGOTO_ERROR(H5E_FPHDF5, H5E_CANTUNLOCK, FAIL, "can't unlock data on SAP!")
-
- /* Flush a thing to the SAP */
- if (thing) {
- if (((H5AC_info_t *)thing)->dirty) {
- if (type->flush(f, dxpl_id, FALSE, addr, thing) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush object")
-
-#ifdef H5AC_DEBUG
- ++cache->diagnostics[type_id].nflushes;
-#endif /* H5AC_DEBUG */
- }
+ HDassert(f);
+ HDassert(f->shared->cache);
+ HDassert(type);
+ HDassert(type->clear);
+ HDassert(type->flush);
+ HDassert(H5F_addr_defined(addr));
+ HDassert(thing);
- /* Always clear/delete the object from the local cache */
- if (type->clear(f, thing, TRUE) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "unable to free object")
- }
+ cache_ptr = (H5AC_t *)(f->shared->cache);
+ info_ptr = (H5AC_info_t *)thing;
- /* Exit now. The FPHDF5 stuff is finished. */
- HGOTO_DONE(SUCCEED);
- }
-#endif /* H5_HAVE_PARALLEL && H5_HAVE_FPHDF5 */
+ HDassert( info_ptr->addr == addr );
+ HDassert( info_ptr->type == type );
-#ifdef H5AC_DEBUG
- /*
- * Remove the object's protect data to indicate that it is no longer
- * protected.
+#ifdef H5_HAVE_PARALLEL
+#ifdef H5_HAVE_FPHDF5
+ /* The following code to support flexible parallel is a direct copy
+ * from the old version of the cache with slight edits. It should
+ * be viewed with as much suspicion as the rest of the FP code.
+ * JRM - 5/26/04
*/
{
- int found = FALSE, i;
- H5AC_prot_t *prot = cache->prot + idx;
-
- for (i = 0; i < prot->nprots && !found; ++i) {
- if (H5F_addr_eq(addr, prot->slot[i]->addr)) {
- assert(prot->slot[i]->type == type);
- HDmemmove(prot->slot + i, prot->slot + i + 1,
- ((prot->nprots - i) - 1) * sizeof(H5AC_info_t *));
- --prot->nprots;
- found = TRUE;
- }
- }
+ H5FD_t * lf;
+ unsigned req_id;
+ H5FP_status_t status;
- assert(found);
- }
-#endif /* H5AC_DEBUG */
+ HDassert(f->shared->lf);
- /* Don't restore deleted objects to the cache */
- if (!deleted) {
-#ifdef H5_HAVE_PARALLEL
- /* If MPI based VFD is used, do special parallel I/O actions */
- if (IS_H5FD_MPI(f)) {
- H5AC_info_t **dinfo;
- H5P_genplist_t *dxpl; /* Dataset transfer property list */
- H5FD_mpio_xfer_t xfer_mode; /* I/O transfer mode property value */
-#ifdef H5AC_DEBUG
- H5AC_subid_t type_id;
-#endif /* H5AC_DEBUG */
-
- /* Get the dataset transfer property list */
- if (NULL == (dxpl = H5I_object(dxpl_id)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset creation property list")
-
- /* Get the transfer mode property */
- if (H5P_get(dxpl, H5D_XFER_IO_XFER_MODE_NAME, &xfer_mode) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't retrieve xfer mode")
-
- /* Get pointer to 'held' information */
- dinfo = cache->dslot + idx;
-
- /* Sanity check transfer mode */
- if (xfer_mode == H5FD_MPIO_COLLECTIVE) {
- /* Check for dirty metadata */
- if (*dinfo) {
- H5AC_dest_func_t dest;
-
- /* Various sanity checks */
- assert((*dinfo)->dirty);
- assert((*info) != NULL);
- assert((*info)->dirty == 0);
-#ifdef H5AC_DEBUG
- type_id = (*info)->type->id; /* Remember this for later */
-#endif /* H5AC_DEBUG */
-
- /* Destroy 'current' information */
- dest = (*info)->type->dest;
-
- if ((dest)(f, (*info)) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "unable to free cached object")
-
- /* Restore 'held' information back to 'current' information */
- (*info) = (*dinfo);
-
- /* Clear 'held' information */
- (*dinfo) = NULL;
-#ifdef H5AC_DEBUG
- ++cache->diagnostics[type_id].nrestores;
-#endif /* H5AC_DEBUG */
- } /* end if */
- } /* end if */
- else {
- /* Sanity check */
- assert((*dinfo) == NULL);
- assert(xfer_mode == H5FD_MPIO_INDEPENDENT);
-
- /* Make certain there will be no write of dirty metadata */
- if ((*info) && (*info)->dirty) {
- /* Sanity check new item */
- assert(((H5AC_info_t*)thing)->dirty == 0);
-
- /* 'Hold' the current metadata for later */
- (*dinfo) = (*info);
-
- /* Reset the 'current' metadata, so it doesn't get flushed */
- (*info) = NULL;
-#ifdef H5AC_DEBUG
- ++cache->diagnostics[(*dinfo)->type->id].nholds;
-#endif /* H5AC_DEBUG */
- } /* end if */
- } /* end else */
- } /* end if */
-#endif /* H5_HAVE_PARALLEL */
+ lf = f->shared->lf;
+
+ if ( H5FD_is_fphdf5_driver(lf) ) {
+
+ HDassert( info_ptr->protected );
+
+ info_ptr->protected = FALSE;
+
+ /*
+ * FIXME: If the metadata is *really* deleted at this point
+ * (deleted == TRUE), we need to send a request to the SAP
+ * telling it to remove that bit of metadata from its cache.
+ */
+ if ( H5FP_request_release_lock(H5FD_fphdf5_file_id(lf), addr,
+ TRUE, &req_id, &status) < 0 )
+ HGOTO_ERROR(H5E_FPHDF5, H5E_CANTUNLOCK, FAIL, \
+ "can't unlock data on SAP!")
+
+ /* Flush a thing to the SAP */
+ if ( thing ) {
- /*
- * Flush any object already in the cache at that location. It had
- * better not be another copy of the protected object.
- */
- if (*info) {
-#ifdef H5AC_DEBUG
- H5AC_subid_t type_id = (*info)->type->id; /* Remember this for later */
-#endif /* H5AC_DEBUG */
+ if ( ((H5AC_info_t *)thing)->dirty ) {
- assert(H5F_addr_ne((*info)->addr, addr));
- flush = (*info)->type->flush;
+ if ( type->flush(f, dxpl_id, FALSE, addr, thing) < 0 ) {
- if ((flush)(f, dxpl_id, TRUE, (*info)->addr, (*info)) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush object")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "unable to flush object")
+ }
+ }
-#ifdef H5AC_DEBUG
- ++cache->diagnostics[type_id].nflushes;
-#endif /* H5AC_DEBUG */
+ /* Always clear/delete the object from the local cache */
+ if ( type->clear(f, thing, TRUE) < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, \
+ "unable to free object")
+
+ }
+ }
+
+ /* Exit now. The FPHDF5 stuff is finished. */
+ HGOTO_DONE(SUCCEED);
}
+ }
+#endif /* H5_HAVE_FPHDF5 */
+#endif /* H5_HAVE_PARALLEL */
- /* Insert the object back into the cache; it is no longer protected. */
- (*info) = thing;
- (*info)->type = type;
- (*info)->addr = addr;
- } /* end if */
- else {
- /* Destroy previously cached thing */
- if ((type->clear)(f, thing, TRUE) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "unable to free object")
- } /* end else */
+ result = H5C_unprotect(f,
+ dxpl_id,
+ H5AC_noblock_dxpl_id,
+ cache_ptr,
+ type,
+ addr,
+ thing,
+ deleted);
+
+ if ( result < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, \
+ "H5C_unprotect() failed.")
+ }
done:
- if (ret_value != FAIL)
- /* Decrement the number of protected items outstanding */
- --cache->nprots;
FUNC_LEAVE_NOAPI(ret_value)
-}
-#ifdef H5AC_DEBUG
+} /* H5AC_unprotect() */
+
/*-------------------------------------------------------------------------
* Function: H5AC_stats
@@ -1727,75 +1194,122 @@ done:
* Thursday, October 30, 1997
*
* Modifications:
+ * John Mainzer 5/19/04
+ * Re-write to support the new metadata cache.
+ *
+ * JRM - 6/7/04
+ * Abstracted the guts of the function to H5C_stats()
+ * in H5C.c, and then re-wrote the function as a wrapper for
+ * H5C_stats().
*
*-------------------------------------------------------------------------
*/
+
herr_t
H5AC_stats(H5F_t UNUSED *f)
{
- H5AC_subid_t i;
- char s[32], ascii[32];
- H5AC_t *cache = f->shared->cache;
- double miss_rate;
- herr_t ret_value=SUCCEED; /* Return value */
+ herr_t ret_value = SUCCEED; /* Return value */
+ H5AC_t * cache_ptr;
FUNC_ENTER_NOAPI(H5AC_stats, FAIL)
- if (H5DEBUG(AC)) {
- fprintf(H5DEBUG(AC), "H5AC: meta data cache statistics for file %s\n",
- f->name);
- fprintf(H5DEBUG(AC), " %-18s %8s %8s %8s %8s+%-8s\n",
- "Layer", "Hits", "Misses", "MissRate", "Inits", "Flushes");
- fprintf(H5DEBUG(AC), " %-18s %8s %8s %8s %8s-%-8s\n",
- "-----", "----", "------", "--------", "-----", "-------");
-
- for (i = H5AC_BT_ID; i < H5AC_NTYPES; i++) {
-
- switch (i) {
- case H5AC_BT_ID:
- HDstrcpy(s, "B-tree nodes");
- break;
- case H5AC_SNODE_ID:
- HDstrcpy(s, "symbol table nodes");
- break;
- case H5AC_LHEAP_ID:
- HDstrcpy (s, "local heaps");
- break;
- case H5AC_GHEAP_ID:
- HDstrcpy (s, "global heaps");
- break;
- case H5AC_OHDR_ID:
- HDstrcpy(s, "object headers");
- break;
- default:
- sprintf(s, "unknown id %d", i);
- }
-
- if (cache->diagnostics[i].nhits>0 ||
- cache->diagnostics[i].nmisses>0) {
- miss_rate = 100.0 * cache->diagnostics[i].nmisses /
- (cache->diagnostics[i].nhits+
- cache->diagnostics[i].nmisses);
- } else {
- miss_rate = 0.0;
- }
-
- if (miss_rate > 100) {
- sprintf(ascii, "%7d%%", (int) (miss_rate + 0.5));
- } else {
- sprintf(ascii, "%7.2f%%", miss_rate);
- }
- fprintf(H5DEBUG(AC), " %-18s %8u %8u %7s %8u%+-9ld\n", s,
- cache->diagnostics[i].nhits,
- cache->diagnostics[i].nmisses,
- ascii,
- cache->diagnostics[i].ninits,
- ((long)(cache->diagnostics[i].nflushes) -
- (long)(cache->diagnostics[i].ninits)));
- }
+ HDassert(f);
+ HDassert(f->shared->cache);
+
+ cache_ptr = (H5AC_t *)(f->shared->cache);
+
+ H5C_stats(cache_ptr, f->name, FALSE); /* at present, this can't fail */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5AC_stats() */
+
+
+/*************************************************************************/
+/**************************** Private Functions: *************************/
+/*************************************************************************/
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5AC_check_if_write_permitted
+ *
+ * Purpose: Determine if a write is permitted under the current
+ * circumstances, and set *write_permitted_ptr accordingly.
+ * As a general rule it is, but when we are running in parallel
+ * mode with collective I/O, we must ensure that a read cannot
+ * cause a write.
+ *
+ * In the event of failure, the value of *write_permitted_ptr
+ * is undefined.
+ *
+ * Return: Non-negative on success/Negative on failure.
+ *
+ * Programmer: John Mainzer, 5/15/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#ifdef H5_HAVE_PARALLEL
+static herr_t
+H5AC_check_if_write_permitted(H5F_t *f,
+ hid_t dxpl_id,
+ hbool_t * write_permitted_ptr)
+#else /* H5_HAVE_PARALLEL */
+static herr_t
+H5AC_check_if_write_permitted(H5F_t UNUSED * f,
+ hid_t UNUSED dxpl_id,
+ hbool_t * write_permitted_ptr)
+#endif /* H5_HAVE_PARALLEL */
+{
+ hbool_t write_permitted = TRUE;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5AC_check_if_write_permitted, FAIL)
+
+#ifdef H5_HAVE_PARALLEL
+
+ if ( IS_H5FD_MPI(f) ) {
+
+ H5P_genplist_t *dxpl; /* Dataset transfer property list */
+ H5FD_mpio_xfer_t xfer_mode; /* I/O transfer mode property value */
+
+ /* Get the dataset transfer property list */
+ if ( NULL == (dxpl = H5I_object(dxpl_id)) ) {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, \
+ "not a dataset creation property list")
+
+ }
+
+ /* Get the transfer mode property */
+ if( H5P_get(dxpl, H5D_XFER_IO_XFER_MODE_NAME, &xfer_mode) < 0 ) {
+
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, \
+ "can't retrieve xfer mode")
+
+ }
+
+ if ( xfer_mode == H5FD_MPIO_INDEPENDENT ) {
+
+ write_permitted = FALSE;
+
+ } else {
+
+ HDassert(xfer_mode == H5FD_MPIO_COLLECTIVE );
+
+ }
}
+#endif /* H5_HAVE_PARALLEL */
+
+ *write_permitted_ptr = write_permitted;
+
done:
+
FUNC_LEAVE_NOAPI(ret_value)
-}
-#endif /* H5AC_DEBUG */
+
+} /* H5AC_check_if_write_permitted() */
+
diff --git a/src/H5ACprivate.h b/src/H5ACprivate.h
index f12f397..6acc3e3 100644
--- a/src/H5ACprivate.h
+++ b/src/H5ACprivate.h
@@ -21,7 +21,9 @@
* Purpose: Constants and typedefs available to the rest of the
* library.
*
- * Modifications:
+ * Modifications: JRM - 6/4/04
+ * Complete re-write for a new caching algorithm
+ * located in H5C.c
*
*-------------------------------------------------------------------------
*/
@@ -29,21 +31,20 @@
#ifndef _H5ACprivate_H
#define _H5ACprivate_H
-#include "H5ACpublic.h" /*public prototypes */
+#include "H5ACpublic.h" /*public prototypes */
/* Pivate headers needed by this header */
#include "H5private.h" /* Generic Functions */
#include "H5Fprivate.h" /* File access */
+#include "H5Cprivate.h" /* cache */
-/*
- * Feature: Define H5AC_DEBUG on the compiler command line if you want to
- * debug H5AC_protect() and H5AC_unprotect() by insuring that
- * nothing accesses protected objects. NDEBUG must not be defined
- * in order for this to have any effect.
- */
-#ifdef NDEBUG
-# undef H5AC_DEBUG
-#endif
+
+#define H5AC_BT_ID 0 /*B-tree nodes */
+#define H5AC_SNODE_ID 1 /*symbol table nodes */
+#define H5AC_LHEAP_ID 2 /*local heap */
+#define H5AC_GHEAP_ID 3 /*global heap */
+#define H5AC_OHDR_ID 4 /*object header */
+#define H5AC_NTYPES 5
/*
* Class methods pertaining to caching. Each type of cached object will
@@ -64,28 +65,38 @@
* DEST: Just frees memory allocated by the LOAD method.
*
* CLEAR: Just marks object as non-dirty.
+ *
+ * SIZE: Report the size (on disk) of the specified cache object.
+ * Note that the space allocated on disk may not be contiguous.
+ */
+
+typedef H5C_load_func_t H5AC_load_func_t;
+typedef H5C_flush_func_t H5AC_flush_func_t;
+typedef H5C_dest_func_t H5AC_dest_func_t;
+typedef H5C_clear_func_t H5AC_clear_func_t;
+typedef H5C_size_func_t H5AC_size_func_t;
+
+typedef H5C_class_t H5AC_class_t;
+
+
+/* The H5AC_NSLOTS #define is now obsolete, as the metadata cache no longer
+ * uses slots. However I am leaving it in for now to avoid modifying the
+ * interface between the metadata cache and the rest of HDF. It should
+ * be removed when we get to dealing with the size_hint parameter in
+ * H5AC_create().
+ * JRM - 5/20/04
+ *
+ * Old comment on H5AC_NSLOTS follows:
+ *
+ * A cache has a certain number of entries. Objects are mapped into a
+ * cache entry by hashing the object's file address. Each file has its
+ * own cache, an array of slots.
*/
-typedef enum H5AC_subid_t {
- H5AC_BT_ID = 0, /*B-tree nodes */
- H5AC_SNODE_ID = 1, /*symbol table nodes */
- H5AC_LHEAP_ID = 2, /*local heap */
- H5AC_GHEAP_ID = 3, /*global heap */
- H5AC_OHDR_ID = 4, /*object header */
- H5AC_NTYPES = 5 /*THIS MUST BE LAST! */
-} H5AC_subid_t;
-
-typedef void *(*H5AC_load_func_t)(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *udata1, void *udata2);
-typedef herr_t (*H5AC_flush_func_t)(H5F_t *f, hid_t dxpl_id, hbool_t dest, haddr_t addr, void *thing);
-typedef herr_t (*H5AC_dest_func_t)(H5F_t *f, void *thing);
-typedef herr_t (*H5AC_clear_func_t)(H5F_t *f, void *thing, hbool_t dest);
-
-typedef struct H5AC_class_t {
- H5AC_subid_t id;
- H5AC_load_func_t load;
- H5AC_flush_func_t flush;
- H5AC_dest_func_t dest;
- H5AC_clear_func_t clear;
-} H5AC_class_t;
+#define H5AC_NSLOTS 10330 /* The library "likes" this number... */
+
+
+typedef H5C_cache_entry_t H5AC_info_t;
+
/*===----------------------------------------------------------------------===
* Protect Types
@@ -95,27 +106,15 @@ typedef struct H5AC_class_t {
* type of operation you're planning on doing to the metadata. The
* Flexible Parallel HDF5 locking can then act accordingly.
*/
+
typedef enum H5AC_protect_t {
H5AC_WRITE, /* Protect object for writing */
H5AC_READ /* Protect object for reading */
} H5AC_protect_t;
-/*
- * A cache has a certain number of entries. Objects are mapped into a
- * cache entry by hashing the object's file address. Each file has its
- * own cache, an array of slots.
- */
-#define H5AC_NSLOTS 10330 /* The library "likes" this number... */
-typedef struct H5AC_info_t {
- const H5AC_class_t *type; /*type of object stored here */
- haddr_t addr; /*file address for object */
- hbool_t dirty; /* 'Dirty' flag for cached object */
-} H5AC_info_t;
-typedef H5AC_info_t *H5AC_info_ptr_t; /* Typedef for free lists */
-
-/* Typedef for metadata cache (defined in H5AC.c) */
-typedef struct H5AC_t H5AC_t;
+/* Typedef for metadata cache (defined in H5C.c) */
+typedef H5C_t H5AC_t;
/* Metadata specific properties for FAPL */
/* (Only used for parallel I/O) */
@@ -158,9 +157,9 @@ H5_DLL herr_t H5AC_flush(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, hadd
H5_DLL herr_t H5AC_rename(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type,
haddr_t old_addr, haddr_t new_addr);
H5_DLL herr_t H5AC_dest(H5F_t *f, hid_t dxpl_id);
-#ifdef H5AC_DEBUG
+
H5_DLL herr_t H5AC_stats(H5F_t *f);
-#endif /* H5AC_DEBUG */
+
#endif /* !_H5ACprivate_H */
diff --git a/src/H5B.c b/src/H5B.c
index a6a4a32..d3aa046 100644
--- a/src/H5B.c
+++ b/src/H5B.c
@@ -156,6 +156,7 @@ static H5B_t *H5B_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *_type,
static herr_t H5B_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5B_t *b);
static herr_t H5B_dest(H5F_t *f, H5B_t *b);
static herr_t H5B_clear(H5F_t *f, H5B_t *b, hbool_t destroy);
+static herr_t H5B_compute_size(H5F_t *f, H5B_t *bt, size_t *size_ptr);
/* H5B inherits cache-like properties from H5AC */
static const H5AC_class_t H5AC_BT[1] = {{
@@ -164,6 +165,7 @@ static const H5AC_class_t H5AC_BT[1] = {{
(H5AC_flush_func_t)H5B_flush,
(H5AC_dest_func_t)H5B_dest,
(H5AC_clear_func_t)H5B_clear,
+ (H5AC_size_func_t)H5B_compute_size,
}};
/* Declare a free list to manage the page information */
@@ -630,6 +632,56 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5B_compute_size
+ *
+ * Purpose: Compute the size in bytes of the specified instance of
+ * H5B_t on disk, and return it in *len_ptr. On failure,
+ * the value of *len_ptr is undefined.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 5/13/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5B_compute_size(H5F_t *f, H5B_t *bt, size_t *size_ptr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+ size_t size;
+
+ FUNC_ENTER_NOAPI(H5B_compute_size, FAIL)
+
+ /* check arguments */
+ HDassert(f);
+ HDassert(bt);
+ HDassert(bt->type);
+ HDassert(size_ptr);
+
+ size = H5B_nodesize(f, bt->type, NULL, bt->sizeof_rkey);
+
+ if ( size == 0 ) {
+
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGETSIZE, FAIL, \
+ "H5B_nodesize() failed");
+
+ } else {
+
+ *size_ptr = size;
+
+ }
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5B_H5B_compute_size() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5B_find
*
* Purpose: Locate the specified information in a B-tree and return
diff --git a/src/H5C.c b/src/H5C.c
new file mode 100644
index 0000000..c16f95f
--- /dev/null
+++ b/src/H5C.c
@@ -0,0 +1,3490 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdf.ncsa.uiuc.edu/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from hdfhelp@ncsa.uiuc.edu. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*-------------------------------------------------------------------------
+ *
+ * Created: H5C.c
+ * June 1 2004
+ * John Mainzer
+ *
+ * Purpose: Functions in this file implement a generic cache for
+ * things which exist on disk, and which may be
+ * unambiguously referenced by their disk addresses.
+ *
+ * The code in this module was initially written in
+ * support of a complete re-write of the metadata cache
+ * in H5AC.c However, other uses for the cache code
+ * suggested themselves, and thus this file was created
+ * in an attempt to support re-use.
+ *
+ * For a detailed overview of the cache, please see the
+ * header comment for H5C_t in this file.
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/**************************************************************************
+ *
+ * To Do:
+ *
+ * Code Changes:
+ *
+ * - Remove extra functionality in H5C_flush_single_entry()?
+ *
+ * - Change protect/unprotect to lock/unlock.
+ *
+ * - Change the way the dirty flag is set. Probably pass it in
+ * as a parameter in unprotect & insert.
+ *
+ * - Size should also be passed in as a parameter in insert and
+ * unprotect -- or some other way should be found to advise the
+ * cache of changes in entry size.
+ *
+ * - Flush entries in increasing address order in
+ * H5C_make_space_in_cache().
+ *
+ * - Also in H5C_make_space_in_cache(), use high and low water marks
+ * to reduce the number of I/O calls.
+ *
+ * - When flushing, attempt to combine contiguous entries to reduce
+ * I/O overhead. Can't do this just yet as some entries are not
+ * contiguous. Do this in parallel only or in serial as well?
+ *
+ * - Create MPI type for dirty objects when flushing in parallel.
+ *
+ * Tests:
+ *
+ * - Trim execution time.
+ *
+ * - Add random tests.
+ *
+ **************************************************************************/
+
+#define H5F_PACKAGE /*suppress error about including H5Fpkg */
+
+/* Pablo information */
+/* (Put before include files to avoid problems with inline functions) */
+#define PABLO_MASK H5C_mask
+
+#include "H5private.h" /* Generic Functions */
+#include "H5Cprivate.h" /* Cache */
+#include "H5Dprivate.h" /* Dataset functions */
+#include "H5Eprivate.h" /* Error handling */
+#include "H5Fpkg.h" /* Files */
+#include "H5FDprivate.h" /* File drivers */
+#include "H5FLprivate.h" /* Free Lists */
+#include "H5Iprivate.h" /* IDs */
+#include "H5MMprivate.h" /* Memory management */
+#include "H5Pprivate.h" /* Property lists */
+#include "H5TBprivate.h" /* Threaded, Balanced, Binary Trees */
+
+
+/* Interface initialization -- disabled in this case */
+#if 0
+static int interface_initialize_g = 0;
+#endif
+#define INTERFACE_INIT NULL
+
+
+/****************************************************************************
+ *
+ * We maintain doubly linked lists of instances of H5C_cache_entry_t for a
+ * variety of reasons -- protected list, LRU list, and the clean and dirty
+ * LRU lists at present. The following macros support linking and unlinking
+ * of instances of H5C_cache_entry_t by both their regular and auxilary next
+ * and previous pointers.
+ *
+ * The size and length fields are also maintained.
+ *
+ * Note that the relevant pair of prev and next pointers are presumed to be
+ * NULL on entry in the insertion macros.
+ *
+ * Finally, observe that the sanity checking macros evaluate to the empty
+ * string when H5C_DO_SANITY_CHECKS is FALSE. They also contain calls
+ * to the HGOTO_ERROR macro, which may not be appropriate in all cases.
+ * If so, we will need versions of the insertion and deletion macros which
+ * do not reference the sanity checking macros.
+ * JRM - 5/5/04
+ *
+ ****************************************************************************/
+
+#if H5C_DO_SANITY_CHECKS
+
+#define H5C__DLL_PRE_REMOVE_SC(entry_ptr, head_ptr, tail_ptr, len, Size) \
+if ( ( (head_ptr) == NULL ) || \
+ ( (tail_ptr) == NULL ) || \
+ ( (entry_ptr) == NULL ) || \
+ ( (len) <= 0 ) || \
+ ( (Size) < (entry_ptr)->size ) || \
+ ( ( (Size) == (entry_ptr)->size ) && ( (len) != 1 ) ) || \
+ ( ( (entry_ptr)->prev == NULL ) && ( (head_ptr) != (entry_ptr) ) ) || \
+ ( ( (entry_ptr)->next == NULL ) && ( (tail_ptr) != (entry_ptr) ) ) || \
+ ( ( (len) == 1 ) && \
+ ( ! ( ( (head_ptr) == (entry_ptr) ) && \
+ ( (tail_ptr) == (entry_ptr) ) && \
+ ( (entry_ptr)->next == NULL ) && \
+ ( (entry_ptr)->prev == NULL ) && \
+ ( (Size) == (entry_ptr)->size ) \
+ ) \
+ ) \
+ ) \
+ ) { \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "DLL pre remove SC failed") \
+}
+
+#define H5C__DLL_SC(head_ptr, tail_ptr, len, Size) \
+if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \
+ ( (head_ptr) != (tail_ptr) ) \
+ ) || \
+ ( (len) < 0 ) || \
+ ( (Size) < 0 ) || \
+ ( ( (len) == 1 ) && \
+ ( ( (head_ptr) != (tail_ptr) ) || ( (cache_ptr)->size <= 0 ) || \
+ ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) ) \
+ ) \
+ ) || \
+ ( ( (len) >= 1 ) && \
+ ( ( (head_ptr) == NULL ) || ( (head_ptr)->prev != NULL ) || \
+ ( (tail_ptr) == NULL ) || ( (tail_ptr)->next != NULL ) \
+ ) \
+ ) \
+ ) { \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "DLL sanity check failed") \
+}
+
+#define H5C__DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size) \
+if ( ( (entry_ptr) == NULL ) || \
+ ( (entry_ptr)->next != NULL ) || \
+ ( (entry_ptr)->prev != NULL ) || \
+ ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \
+ ( (head_ptr) != (tail_ptr) ) \
+ ) || \
+ ( (len) < 0 ) || \
+ ( ( (len) == 1 ) && \
+ ( ( (head_ptr) != (tail_ptr) ) || ( (Size) <= 0 ) || \
+ ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) ) \
+ ) \
+ ) || \
+ ( ( (len) >= 1 ) && \
+ ( ( (head_ptr) == NULL ) || ( (head_ptr)->prev != NULL ) || \
+ ( (tail_ptr) == NULL ) || ( (tail_ptr)->next != NULL ) \
+ ) \
+ ) \
+ ) { \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "DLL pre insert SC failed") \
+}
+
+#else /* H5C_DO_SANITY_CHECKS */
+
+#define H5C__DLL_PRE_REMOVE_SC(entry_ptr, head_ptr, tail_ptr, len, Size)
+#define H5C__DLL_SC(head_ptr, tail_ptr, len, Size)
+#define H5C__DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size)
+
+#endif /* H5C_DO_SANITY_CHECKS */
+
+
+#define H5C__DLL_APPEND(entry_ptr, head_ptr, tail_ptr, len, Size) \
+ H5C__DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size) \
+ if ( (head_ptr) == NULL ) \
+ { \
+ (head_ptr) = (entry_ptr); \
+ (tail_ptr) = (entry_ptr); \
+ } \
+ else \
+ { \
+ (tail_ptr)->next = (entry_ptr); \
+ (entry_ptr)->prev = (tail_ptr); \
+ (tail_ptr) = (entry_ptr); \
+ } \
+ (len)++; \
+ (Size) += (entry_ptr)->size;
+
+#define H5C__DLL_PREPEND(entry_ptr, head_ptr, tail_ptr, len, Size) \
+ H5C__DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size) \
+ if ( (head_ptr) == NULL ) \
+ { \
+ (head_ptr) = (entry_ptr); \
+ (tail_ptr) = (entry_ptr); \
+ } \
+ else \
+ { \
+ (head_ptr)->prev = (entry_ptr); \
+ (entry_ptr)->next = (head_ptr); \
+ (head_ptr) = (entry_ptr); \
+ } \
+ (len)++; \
+ (Size) += entry_ptr->size;
+
+#define H5C__DLL_REMOVE(entry_ptr, head_ptr, tail_ptr, len, Size) \
+ H5C__DLL_PRE_REMOVE_SC(entry_ptr, head_ptr, tail_ptr, len, Size) \
+ { \
+ if ( (head_ptr) == (entry_ptr) ) \
+ { \
+ (head_ptr) = (entry_ptr)->next; \
+ if ( (head_ptr) != NULL ) \
+ { \
+ (head_ptr)->prev = NULL; \
+ } \
+ } \
+ else \
+ { \
+ (entry_ptr)->prev->next = (entry_ptr)->next; \
+ } \
+ if ( (tail_ptr) == (entry_ptr) ) \
+ { \
+ (tail_ptr) = (entry_ptr)->prev; \
+ if ( (tail_ptr) != NULL ) \
+ { \
+ (tail_ptr)->next = NULL; \
+ } \
+ } \
+ else \
+ { \
+ (entry_ptr)->next->prev = (entry_ptr)->prev; \
+ } \
+ entry_ptr->next = NULL; \
+ entry_ptr->prev = NULL; \
+ (len)--; \
+ (Size) -= entry_ptr->size; \
+ }
+
+
+#if H5C_DO_SANITY_CHECKS
+
+#define H5C__AUX_DLL_PRE_REMOVE_SC(entry_ptr, head_ptr, tail_ptr, len, Size) \
+if ( ( (head_ptr) == NULL ) || \
+ ( (tail_ptr) == NULL ) || \
+ ( (entry_ptr) == NULL ) || \
+ ( (len) <= 0 ) || \
+ ( (Size) < (entry_ptr)->size ) || \
+ ( ( (Size) == (entry_ptr)->size ) && ( ! ( (len) == 1 ) ) ) || \
+ ( ( (entry_ptr)->aux_prev == NULL ) && ( (head_ptr) != (entry_ptr) ) ) || \
+ ( ( (entry_ptr)->aux_next == NULL ) && ( (tail_ptr) != (entry_ptr) ) ) || \
+ ( ( (len) == 1 ) && \
+ ( ! ( ( (head_ptr) == (entry_ptr) ) && ( (tail_ptr) == (entry_ptr) ) && \
+ ( (entry_ptr)->aux_next == NULL ) && \
+ ( (entry_ptr)->aux_prev == NULL ) && \
+ ( (Size) == (entry_ptr)->size ) \
+ ) \
+ ) \
+ ) \
+ ) { \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "aux DLL pre remove SC failed") \
+}
+
+#define H5C__AUX_DLL_SC(head_ptr, tail_ptr, len, Size) \
+if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \
+ ( (head_ptr) != (tail_ptr) ) \
+ ) || \
+ ( (len) < 0 ) || \
+ ( (Size) < 0 ) || \
+ ( ( (len) == 1 ) && \
+ ( ( (head_ptr) != (tail_ptr) ) || ( (Size) <= 0 ) || \
+ ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) ) \
+ ) \
+ ) || \
+ ( ( (len) >= 1 ) && \
+ ( ( (head_ptr) == NULL ) || ( (head_ptr)->aux_prev != NULL ) || \
+ ( (tail_ptr) == NULL ) || ( (tail_ptr)->aux_next != NULL ) \
+ ) \
+ ) \
+ ) { \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "AUX DLL sanity check failed") \
+}
+
+#define H5C__AUX_DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size) \
+if ( ( (entry_ptr) == NULL ) || \
+ ( (entry_ptr)->aux_next != NULL ) || \
+ ( (entry_ptr)->aux_prev != NULL ) || \
+ ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \
+ ( (head_ptr) != (tail_ptr) ) \
+ ) || \
+ ( (len) < 0 ) || \
+ ( ( (len) == 1 ) && \
+ ( ( (head_ptr) != (tail_ptr) ) || ( (Size) <= 0 ) || \
+ ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) ) \
+ ) \
+ ) || \
+ ( ( (len) >= 1 ) && \
+ ( ( (head_ptr) == NULL ) || ( (head_ptr)->aux_prev != NULL ) || \
+ ( (tail_ptr) == NULL ) || ( (tail_ptr)->aux_next != NULL ) \
+ ) \
+ ) \
+ ) { \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "AUX DLL pre insert SC failed") \
+}
+
+#else /* H5C_DO_SANITY_CHECKS */
+
+#define H5C__AUX_DLL_PRE_REMOVE_SC(entry_ptr, head_ptr, tail_ptr, len, Size)
+#define H5C__AUX_DLL_SC(head_ptr, tail_ptr, len, Size)
+#define H5C__AUX_DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size)
+
+#endif /* H5C_DO_SANITY_CHECKS */
+
+
+#define H5C__AUX_DLL_APPEND(entry_ptr, head_ptr, tail_ptr, len, Size) \
+ H5C__AUX_DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size) \
+ if ( (head_ptr) == NULL ) \
+ { \
+ (head_ptr) = (entry_ptr); \
+ (tail_ptr) = (entry_ptr); \
+ } \
+ else \
+ { \
+ (tail_ptr)->aux_next = (entry_ptr); \
+ (entry_ptr)->aux_prev = (tail_ptr); \
+ (tail_ptr) = (entry_ptr); \
+ } \
+ (len)++; \
+ (Size) += entry_ptr->size;
+
+#define H5C__AUX_DLL_PREPEND(entry_ptr, head_ptr, tail_ptr, len, Size) \
+ H5C__AUX_DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size) \
+ if ( (head_ptr) == NULL ) \
+ { \
+ (head_ptr) = (entry_ptr); \
+ (tail_ptr) = (entry_ptr); \
+ } \
+ else \
+ { \
+ (head_ptr)->aux_prev = (entry_ptr); \
+ (entry_ptr)->aux_next = (head_ptr); \
+ (head_ptr) = (entry_ptr); \
+ } \
+ (len)++; \
+ (Size) += entry_ptr->size;
+
+#define H5C__AUX_DLL_REMOVE(entry_ptr, head_ptr, tail_ptr, len, Size) \
+ H5C__AUX_DLL_PRE_REMOVE_SC(entry_ptr, head_ptr, tail_ptr, len, Size) \
+ { \
+ if ( (head_ptr) == (entry_ptr) ) \
+ { \
+ (head_ptr) = (entry_ptr)->aux_next; \
+ if ( (head_ptr) != NULL ) \
+ { \
+ (head_ptr)->aux_prev = NULL; \
+ } \
+ } \
+ else \
+ { \
+ (entry_ptr)->aux_prev->aux_next = (entry_ptr)->aux_next; \
+ } \
+ if ( (tail_ptr) == (entry_ptr) ) \
+ { \
+ (tail_ptr) = (entry_ptr)->aux_prev; \
+ if ( (tail_ptr) != NULL ) \
+ { \
+ (tail_ptr)->aux_next = NULL; \
+ } \
+ } \
+ else \
+ { \
+ (entry_ptr)->aux_next->aux_prev = (entry_ptr)->aux_prev; \
+ } \
+ entry_ptr->aux_next = NULL; \
+ entry_ptr->aux_prev = NULL; \
+ (len)--; \
+ (Size) -= entry_ptr->size; \
+ }
+
+
+/***********************************************************************
+ *
+ * Stats collection macros
+ *
+ * The following macros must handle stats collection when this collection
+ * is encabled, and evaluate to the empty string when it is not.
+ *
+ ***********************************************************************/
+
+#if H5C_COLLECT_CACHE_STATS
+
+#define H5C__UPDATE_STATS_FOR_INSERTION(cache_ptr, entry_ptr) \
+ (((cache_ptr)->insertions)[(entry_ptr)->type->id])++; \
+ if ( (cache_ptr)->index_len > (cache_ptr)->max_index_len ) \
+ (cache_ptr)->max_index_len = (cache_ptr)->index_len; \
+ if ( (cache_ptr)->index_size > (cache_ptr)->max_index_size ) \
+ (cache_ptr)->max_index_size = (cache_ptr)->index_size;
+
+#define H5C__UPDATE_STATS_FOR_RENAME(cache_ptr, entry_ptr) \
+ (((cache_ptr)->renames)[(entry_ptr)->type->id])++;
+
+#if H5C_COLLECT_CACHE_ENTRY_STATS
+
+#define H5C__RESET_CACHE_ENTRY_STATS(entry_ptr) \
+ (entry_ptr)->accesses = 0; \
+ (entry_ptr)->clears = 0; \
+ (entry_ptr)->flushes = 0;
+
+#define H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr) \
+ (((cache_ptr)->clears)[(entry_ptr)->type->id])++; \
+ ((entry_ptr)->clears)++;
+
+#define H5C__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr) \
+ (((cache_ptr)->flushes)[(entry_ptr)->type->id])++; \
+ ((entry_ptr)->flushes)++;
+
+#define H5C__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr) \
+ (((cache_ptr)->evictions)[(entry_ptr)->type->id])++; \
+ if ( (entry_ptr)->accesses > \
+ ((cache_ptr)->max_accesses)[(entry_ptr)->type->id] ) { \
+ ((cache_ptr)->max_accesses)[(entry_ptr)->type->id] \
+ = (entry_ptr)->accesses; \
+ } \
+ if ( (entry_ptr)->accesses < \
+ ((cache_ptr)->min_accesses)[(entry_ptr)->type->id] ) { \
+ ((cache_ptr)->min_accesses)[(entry_ptr)->type->id] \
+ = (entry_ptr)->accesses; \
+ } \
+ if ( (entry_ptr)->clears > \
+ ((cache_ptr)->max_clears)[(entry_ptr)->type->id] ) { \
+ ((cache_ptr)->max_clears)[(entry_ptr)->type->id] \
+ = (entry_ptr)->clears; \
+ } \
+ if ( (entry_ptr)->flushes > \
+ ((cache_ptr)->max_flushes)[(entry_ptr)->type->id] ) { \
+ ((cache_ptr)->max_flushes)[(entry_ptr)->type->id] \
+ = (entry_ptr)->flushes; \
+ } \
+
+#define H5C__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit) \
+ if ( hit ) \
+ ((cache_ptr)->hits)[(entry_ptr)->type->id]++; \
+ else \
+ ((cache_ptr)->misses)[(entry_ptr)->type->id]++; \
+ if ( (cache_ptr)->index_len > (cache_ptr)->max_index_len ) \
+ (cache_ptr)->max_index_len = (cache_ptr)->index_len; \
+ if ( (cache_ptr)->index_size > (cache_ptr)->max_index_size ) \
+ (cache_ptr)->max_index_size = (cache_ptr)->index_size; \
+ if ( (cache_ptr)->pl_len > (cache_ptr)->max_pl_len ) \
+ (cache_ptr)->max_pl_len = (cache_ptr)->pl_len; \
+ if ( (cache_ptr)->pl_size > (cache_ptr)->max_pl_size ) \
+ (cache_ptr)->max_pl_size = (cache_ptr)->pl_size; \
+ ((entry_ptr)->accesses)++;
+
+#else /* H5C_COLLECT_CACHE_ENTRY_STATS */
+
+#define H5C__RESET_CACHE_ENTRY_STATS(entry_ptr)
+
+#define H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr) \
+ (((cache_ptr)->clears)[(entry_ptr)->type->id])++;
+
+#define H5C__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr) \
+ (((cache_ptr)->flushes)[(entry_ptr)->type->id])++;
+
+#define H5C__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr) \
+ (((cache_ptr)->evictions)[(entry_ptr)->type->id])++;
+
+#define H5C__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit) \
+ if ( hit ) \
+ ((cache_ptr)->hits)[(entry_ptr)->type->id]++; \
+ else \
+ ((cache_ptr)->misses)[(entry_ptr)->type->id]++; \
+ if ( (cache_ptr)->index_len > (cache_ptr)->max_index_len ) \
+ (cache_ptr)->max_index_len = (cache_ptr)->index_len; \
+ if ( (cache_ptr)->index_size > (cache_ptr)->max_index_size ) \
+ (cache_ptr)->max_index_size = (cache_ptr)->index_size; \
+ if ( (cache_ptr)->pl_len > (cache_ptr)->max_pl_len ) \
+ (cache_ptr)->max_pl_len = (cache_ptr)->pl_len; \
+ if ( (cache_ptr)->pl_size > (cache_ptr)->max_pl_size ) \
+ (cache_ptr)->max_pl_size = (cache_ptr)->pl_size;
+
+#endif /* H5C_COLLECT_CACHE_ENTRY_STATS */
+
+#else /* H5C_COLLECT_CACHE_STATS */
+
+#define H5C__RESET_CACHE_ENTRY_STATS(entry_ptr)
+#define H5C__UPDATE_STATS_FOR_RENAME(cache_ptr, entry_ptr)
+#define H5C__UPDATE_STATS_FOR_INSERTION(cache_ptr, entry_ptr)
+#define H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr)
+#define H5C__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr)
+#define H5C__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr)
+#define H5C__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit)
+
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+
+/****************************************************************************
+ *
+ * structure H5C_t
+ *
+ * Catchall structure for all variables specific to an instance of the cache.
+ *
+ * While the individual fields of the structure are discussed below, the
+ * following overview may be helpful.
+ *
+ * Entries in the cache are stored in an instance of H5TB_TREE, indexed on
+ * the entry's disk address. While the H5TB_TREE is less efficient than
+ * hash table, it keeps the entries in address sorted order. As flushes
+ * in parallel mode are more efficient if they are issued in increasing
+ * address order, this is a significant benefit. Also the H5TB_TREE code
+ * was readily available, which reduced development time.
+ *
+ * While the cache was designed with multiple replacement policies in mind,
+ * at present only a modified form of LRU is supported.
+ *
+ * JRM - 4/26/04
+ *
+ * magic: Unsigned 32 bit integer always set to H5C__H5C_T_MAGIC. This
+ * field is used to validate pointers to instances of H5C_t.
+ *
+ * max_type_id: Integer field containing the maximum type id number assigned
+ * to a type of entry in the cache. All type ids from 0 to
+ * max_type_id inclusive must be defined. The names of the
+ * types are stored in the type_name_table discussed below, and
+ * indexed by the ids.
+ *
+ * type_name_table_ptr: Pointer to an array of pointer to char of length
+ * max_type_id + 1. The strings pointed to by the entries
+ * in the array are the names of the entry types associated
+ * with the indexing type IDs.
+ *
+ * max_cache_size: Nominal maximum number of bytes that may be stored in the
+ * cache. This value should be viewed as a soft limit, as the
+ * cache can exceed this value under the following circumstances:
+ *
+ * a) All entries in the cache are protected, and the cache is
+ * asked to insert a new entry. In this case the new entry
+ * will be created. If this causes the cache to exceed
+ * max_cache_size, it will do so. The cache will attempt
+ * to reduce its size as entries are unprotected.
+ *
+ * b) When running in parallel mode, the cache may not be
+ * permitted to flush a dirty entry in response to a read.
+ * If there are no clean entries available to evict, the
+ * cache will exceed its maximum size. Again the cache
+ * will attempt to reduce its size to the max_cache_size
+ * limit on the next cache write.
+ *
+ * min_clean_size: Nominal minimum number of clean bytes in the cache.
+ * The cache attempts to maintain this number of bytes of
+ * clean data so as to avoid case b) above. Again, this is
+ * a soft limit.
+ *
+ *
+ * In addition to the call back functions required for each entry, the
+ * cache requires the following call back functions for this instance of
+ * the cache as a whole:
+ *
+ * check_write_permitted: In certain applications, the cache may not
+ * be allowed to write to disk at certain time. If specified,
+ * the check_write_permitted function is used to determine if
+ * a write is permissible at any given point in time.
+ *
+ * If no such function is specified (i.e. this field is NULL),
+ * the cache will presume that writes are always permissable.
+ *
+ *
+ * The cache requires an index to facilitate searching for entries. The
+ * following fields support that index.
+ *
+ * index_len: Number of entries currently in the threaded binary B-tree
+ * used to index the cache.
+ *
+ * index_size: Number of bytes of cache entries currently stored in the
+ * threaded binary B-tree used to index the cache.
+ *
+ * This value should not be mistaken for footprint of the
+ * cache in memory. The average cache entry is small, and
+ * the cache has a considerable overhead. Multiplying the
+ * index_size by two should yield a conservative estimate
+ * of the cache's memory footprint.
+ *
+ * index_tree_ptr: pointer to the instance of H5TB_TREE used to index
+ * the cache. I use an instance of H5TB_TREE instead of
+ * a more conventional hash table based design for two
+ * reasons:
+ *
+ * a) the code is already present and tested.
+ *
+ * b) the H5TB_TREE makes it easy to check for adjacent
+ * cache entries so that writes can be combined and
+ * thus optimized.
+ *
+ * If time permitted, a more efficient index could be
+ * constructed. However, this should do for now. If the
+ * additional lookup overhead proves excessive, I will
+ * write specialized code.
+ *
+ *
+ * When a cache entry is protected, it must be removed from the LRU
+ * list(s) as it cannot be either flushed or evicted until it is unprotected.
+ * The following fields are used to implement the protected list (pl).
+ *
+ * pl_len: Number of entries currently residing on the protected list.
+ *
+ * pl_size: Number of bytes of cache entries currently residing on the
+ * protected list.
+ *
+ * pl_head_ptr: Pointer to the head of the doubly linked list of protected
+ * entries. Note that cache entries on this list are linked
+ * by their next and prev fields.
+ *
+ * This field is NULL if the list is empty.
+ *
+ * pl_tail_ptr: Pointer to the tail of the doubly linked list of protected
+ * entries. Note that cache entries on this list are linked
+ * by their next and prev fields.
+ *
+ * This field is NULL if the list is empty.
+ *
+ *
+ * The cache must have a replacement policy, and the fields supporting this
+ * policy must be accessible from this structure.
+ *
+ * While there has been interest in several replacement policies for
+ * this cache, the initial development schedule is tight. Thus I have
+ * elected to support only a modified LRU policy for the first cut.
+ *
+ * To further simplify matters, I have simply included the fields needed
+ * by the modified LRU in this structure. When and if we add support for
+ * other policies, it will probably be easiest to just add the necessary
+ * fields to this structure as well -- we only create one instance of this
+ * structure per file, so the overhead is not excessive.
+ *
+ *
+ * Fields supporting the modified LRU policy:
+ *
+ * See most any OS text for a discussion of the LRU replacement policy.
+ *
+ * When operating in parallel mode, we must ensure that a read does not
+ * cause a write. If it does, the process will hang, as the write will
+ * be collective and the other processes will not know to participate.
+ *
+ * To deal with this issue, I have modified the usual LRU policy by adding
+ * clean and dirty LRU lists to the usual LRU list.
+ *
+ * The clean LRU list is simply the regular LRU list with all dirty cache
+ * entries removed.
+ *
+ * Similarly, the dirty LRU list is the regular LRU list with all the clean
+ * cache entries removed.
+ *
+ * When reading in parallel mode, we evict from the clean LRU list only.
+ * This implies that we must try to ensure that the clean LRU list is
+ * reasonably well stocked at all times.
+ *
+ * We attempt to do this by trying to flush enough entries on each write
+ * to keep the cLRU_list_size >= min_clean_size.
+ *
+ * Even if we start with a completely clean cache, a sequence of protects
+ * without unprotects can empty the clean LRU list. In this case, the
+ * cache must grow temporarily. At the next write, we will attempt to
+ * evict enough entries to reduce index_size to less than max_cache_size.
+ * While this will usually be possible, all bets are off if enough entries
+ * are protected.
+ *
+ * Discussions of the individual fields used by the modified LRU replacement
+ * policy follow:
+ *
+ * LRU_list_len: Number of cache entries currently on the LRU list.
+ *
+ * Observe that LRU_list_len + pl_len must always equal
+ * index_len.
+ *
+ * LRU_list_size: Number of bytes of cache entries currently residing on the
+ * LRU list.
+ *
+ * Observe that LRU_list_size + pl_size must always equal
+ * index_size.
+ *
+ * LRU_head_ptr: Pointer to the head of the doubly linked LRU list. Cache
+ * entries on this list are linked by their next and prev fields.
+ *
+ * This field is NULL if the list is empty.
+ *
+ * LRU_tail_ptr: Pointer to the tail of the doubly linked LRU list. Cache
+ * entries on this list are linked by their next and prev fields.
+ *
+ * This field is NULL if the list is empty.
+ *
+ * cLRU_list_len: Number of cache entries currently on the clean LRU list.
+ *
+ * Observe that cLRU_list_len + dLRU_list_len must always
+ * equal LRU_list_len.
+ *
+ * cLRU_list_size: Number of bytes of cache entries currently residing on
+ * the clean LRU list.
+ *
+ * Observe that cLRU_list_size + dLRU_list_size must always
+ * equal LRU_list_size.
+ *
+ * cLRU_head_ptr: Pointer to the head of the doubly linked clean LRU list.
+ * Cache entries on this list are linked by their aux_next and
+ * aux_prev fields.
+ *
+ * This field is NULL if the list is empty.
+ *
+ * cLRU_tail_ptr: Pointer to the tail of the doubly linked clean LRU list.
+ * Cache entries on this list are linked by their aux_next and
+ * aux_prev fields.
+ *
+ * This field is NULL if the list is empty.
+ *
+ * dLRU_list_len: Number of cache entries currently on the dirty LRU list.
+ *
+ * Observe that cLRU_list_len + dLRU_list_len must always
+ * equal LRU_list_len.
+ *
+ * dLRU_list_size: Number of cache entries currently on the dirty LRU list.
+ *
+ * Observe that cLRU_list_len + dLRU_list_len must always
+ * equal LRU_list_len.
+ *
+ * dLRU_head_ptr: Pointer to the head of the doubly linked dirty LRU list.
+ * Cache entries on this list are linked by their aux_next and
+ * aux_prev fields.
+ *
+ * This field is NULL if the list is empty.
+ *
+ * dLRU_tail_ptr: Pointer to the tail of the doubly linked dirty LRU list.
+ * Cache entries on this list are linked by their aux_next and
+ * aux_prev fields.
+ *
+ * This field is NULL if the list is empty.
+ *
+ *
+ * Statistics collection fields:
+ *
+ * When enabled, these fields are used to collect statistics as described
+ * below. The first set are collected only when H5C_COLLECT_CACHE_STATS
+ * is true.
+ *
+ * hits: Array of int64 of length H5C__MAX_NUM_TYPE_IDS. The cells
+ * are used to record the number of times an entry with type id
+ * equal to the array index has been in cache when requested in
+ * the current epoch.
+ *
+ * misses: Array of int64 of length H5C__MAX_NUM_TYPE_IDS. The cells
+ * are used to record the number of times an entry with type id
+ * equal to the array index has not been in cache when
+ * requested in the current epoch.
+ *
+ * insertions: Array of int64 of length H5C__MAX_NUM_TYPE_IDS. The cells
+ * are used to record the number of times an entry with type
+ * id equal to the array index has been inserted into the
+ * cache in the current epoch.
+ *
+ * clears: Array of int64 of length H5C__MAX_NUM_TYPE_IDS. The cells
+ * are used to record the number of times an entry with type
+ * id equal to the array index has been cleared in the current
+ * epoch.
+ *
+ * flushes: Array of int64 of length H5C__MAX_NUM_TYPE_IDS. The cells
+ * are used to record the number of times an entry with type id
+ * equal to the array index has been written to disk in the
+ * current epoch.
+ *
+ * evictions: Array of int64 of length H5C__MAX_NUM_TYPE_IDS. The cells
+ * are used to record the number of times an entry with type id
+ * equal to the array index has been evicted from the cache in
+ * the current epoch.
+ *
+ * renames: Array of int64 of length H5C__MAX_NUM_TYPE_IDS. The cells
+ * are used to record the number of times an entry with type
+ * id equal to the array index has been renamed in the current
+ * epoch.
+ *
+ * max_index_len: Largest value attained by the index_len field in the
+ * current epoch.
+ *
+ * max_index_size: Largest value attained by the index_size field in the
+ * current epoch.
+ *
+ * max_pl_len: Largest value attained by the pl_len field in the
+ * current epoch.
+ *
+ * max_pl_size: Largest value attained by the pl_size field in the
+ * current epoch.
+ *
+ * The remaining stats are collected only when both H5C_COLLECT_CACHE_STATS
+ * and H5C_COLLECT_CACHE_ENTRY_STATS are true.
+ *
+ * max_accesses: Array of int32 of length H5C__MAX_NUM_TYPE_IDS. The cells
+ * are used to record the maximum number of times any single
+ * entry with type id equal to the array index has been
+ * accessed in the current epoch.
+ *
+ * min_accesses: Array of int32 of length H5C__MAX_NUM_TYPE_IDS. The cells
+ * are used to record the minimum number of times any single
+ * entry with type id equal to the array index has been
+ * accessed in the current epoch.
+ *
+ * max_clears: Array of int32 of length H5C__MAX_NUM_TYPE_IDS. The cells
+ * are used to record the maximum number of times any single
+ * entry with type id equal to the array index has been cleared
+ * in the current epoch.
+ *
+ * max_flushes: Array of int32 of length H5C__MAX_NUM_TYPE_IDS. The cells
+ * are used to record the maximum number of times any single
+ * entry with type id equal to the array index has been
+ * flushed in the current epoch.
+ *
+ *
+ * Fields supporting testing:
+ *
+ * For test purposes, it is useful to turn off some asserts and sanity
+ * checks. The following flags support this.
+ *
+ * skip_file_checks: Boolean flag used to skip sanity checks on file
+ * parameters passed to the cache. In the test bed, there
+ * is no reason to have a file open, as the cache proper
+ * just passes these parameters through without using them.
+ *
+ * When this flag is set, all sanity checks on the file
+ * parameters are skipped. The field defaults to FALSE.
+ *
+ * skip_dxpl_id_checks: Boolean flag used to skip sanity checks on the
+ * dxpl_id parameters passed to the cache. These are not
+ * used directly by the cache, so skipping the checks
+ * simplifies the test bed.
+ *
+ * When this flag is set, all sanity checks on the dxpl_id
+ * parameters are skipped. The field defaults to FALSE.
+ *
+ ****************************************************************************/
+
+#define H5C__H5C_T_MAGIC 0x005CAC0E
+#define H5C__MAX_NUM_TYPE_IDS 9
+
+struct H5C_t
+{
+ uint32_t magic;
+
+ int32_t max_type_id;
+ const char * (* type_name_table_ptr)[];
+
+ size_t max_cache_size;
+ size_t min_clean_size;
+
+ H5C_write_permitted_func_t check_write_permitted;
+
+ int32_t index_len;
+ size_t index_size;
+ H5TB_TREE * index_tree_ptr;
+
+ int32_t pl_len;
+ size_t pl_size;
+ H5C_cache_entry_t * pl_head_ptr;
+ H5C_cache_entry_t * pl_tail_ptr;
+
+ int32_t LRU_list_len;
+ size_t LRU_list_size;
+ H5C_cache_entry_t * LRU_head_ptr;
+ H5C_cache_entry_t * LRU_tail_ptr;
+
+ int32_t cLRU_list_len;
+ size_t cLRU_list_size;
+ H5C_cache_entry_t * cLRU_head_ptr;
+ H5C_cache_entry_t * cLRU_tail_ptr;
+
+ int32_t dLRU_list_len;
+ size_t dLRU_list_size;
+ H5C_cache_entry_t * dLRU_head_ptr;
+ H5C_cache_entry_t * dLRU_tail_ptr;
+
+#if H5C_COLLECT_CACHE_STATS
+
+ /* stats fields */
+ int64_t hits[H5C__MAX_NUM_TYPE_IDS];
+ int64_t misses[H5C__MAX_NUM_TYPE_IDS];
+ int64_t insertions[H5C__MAX_NUM_TYPE_IDS];
+ int64_t clears[H5C__MAX_NUM_TYPE_IDS];
+ int64_t flushes[H5C__MAX_NUM_TYPE_IDS];
+ int64_t evictions[H5C__MAX_NUM_TYPE_IDS];
+ int64_t renames[H5C__MAX_NUM_TYPE_IDS];
+
+ int32_t max_index_len;
+ size_t max_index_size;
+
+ int32_t max_pl_len;
+ size_t max_pl_size;
+
+#if H5C_COLLECT_CACHE_ENTRY_STATS
+
+ int32_t max_accesses[H5C__MAX_NUM_TYPE_IDS];
+ int32_t min_accesses[H5C__MAX_NUM_TYPE_IDS];
+ int32_t max_clears[H5C__MAX_NUM_TYPE_IDS];
+ int32_t max_flushes[H5C__MAX_NUM_TYPE_IDS];
+
+#endif /* H5C_COLLECT_CACHE_ENTRY_STATS */
+
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ hbool_t skip_file_checks;
+ hbool_t skip_dxpl_id_checks;
+
+};
+
+
+/*
+ * Private file-scope variables.
+ */
+
+/* Declare a free list to manage the H5C_t struct */
+H5FL_DEFINE_STATIC(H5C_t);
+
+/*
+ * Private file-scope function declarations:
+ */
+
+static herr_t H5C_flush_single_entry(H5F_t * f,
+ hid_t primary_dxpl_id,
+ hid_t secondary_dxpl_id,
+ H5C_t * cache_ptr,
+ const H5C_class_t * type_ptr,
+ haddr_t addr,
+ unsigned flags,
+ H5TB_NODE * tgt_node_ptr,
+ hbool_t * first_flush_ptr,
+ hbool_t remove_entry_from_tree_on_destroy);
+
+static herr_t H5C_insert_entry_in_tree(H5C_t * cache_ptr,
+ H5C_cache_entry_t * entry_ptr);
+
+static void * H5C_load_entry(H5F_t * f,
+ hid_t dxpl_id,
+ const H5C_class_t * type,
+ haddr_t addr,
+ const void * udata1,
+ void * udata2,
+ hbool_t skip_file_checks);
+
+static herr_t H5C_make_space_in_cache(H5F_t * f,
+ hid_t primary_dxpl_id,
+ hid_t secondary_dxpl_id,
+ H5C_t * cache_ptr,
+ size_t space_needed,
+ hbool_t write_permitted);
+
+static herr_t H5C_remove_entry_from_tree(H5C_t * cache_ptr,
+ H5C_cache_entry_t * entry_ptr,
+ H5TB_NODE * node_ptr);
+
+static herr_t H5C_update_rp_for_eviction(H5C_t * cache_ptr,
+ H5C_cache_entry_t * entry_ptr);
+
+static herr_t H5C_update_rp_for_flush(H5C_t * cache_ptr,
+ H5C_cache_entry_t * entry_ptr);
+
+static herr_t H5C_update_rp_for_insertion(H5C_t * cache_ptr,
+ H5C_cache_entry_t * entry_ptr);
+
+static herr_t H5C_update_rp_for_protect(H5C_t * cache_ptr,
+ H5C_cache_entry_t * entry_ptr);
+
+static herr_t H5C_update_rp_for_rename(H5C_t * cache_ptr,
+ H5C_cache_entry_t * entry_ptr);
+
+static herr_t H5C_update_rp_for_unprotect(H5C_t * cache_ptr,
+ H5C_cache_entry_t * entry_ptr);
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_create
+ *
+ * Purpose: Allocate, initialize, and return the address of a new
+ * instance of H5C_t.
+ *
+ * In general, the max_cache_size parameter must be positive,
+ * and the min_clean_size parameter must lie in the closed
+ * interval [0, max_cache_size].
+ *
+ * The check_write_permitted parameter must either be NULL,
+ * or point to a function of type H5C_write_permitted_func_t.
+ * If it is NULL, the cache will presume that writes are
+ * always permitted.
+ *
+ * Return: Success: Pointer to the new instance.
+ *
+ * Failure: NULL
+ *
+ * Programmer: John Mainzer
+ * 6/2/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+H5C_t *
+H5C_create(size_t max_cache_size,
+ size_t min_clean_size,
+ int max_type_id,
+ const char * (* type_name_table_ptr)[],
+ H5C_write_permitted_func_t check_write_permitted)
+{
+ int i;
+ H5C_t * cache_ptr = NULL;
+ H5C_t * ret_value = NULL; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5C_create, NULL)
+
+ HDassert( max_cache_size > 0 );
+ HDassert( min_clean_size <= max_cache_size );
+
+ HDassert( max_type_id >= 0 );
+ HDassert( max_type_id < H5C__MAX_NUM_TYPE_IDS );
+ HDassert( type_name_table_ptr );
+
+ for ( i = 0; i <= max_type_id; i++ ) {
+
+ HDassert( (*type_name_table_ptr)[i] );
+ HDassert( strlen((* type_name_table_ptr)[i]) > 0 );
+ }
+
+
+ if ( NULL == (cache_ptr = H5FL_CALLOC(H5C_t)) ) {
+
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, \
+ "memory allocation failed")
+ }
+
+ if ( (cache_ptr->index_tree_ptr = H5TB_fast_dmake(H5TB_FAST_HADDR_COMPARE))
+ == NULL ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, NULL, "can't create TBBT.")
+ }
+
+ /* If we get this far, we should succeed. Go ahead and initialize all
+ * the fields.
+ */
+
+ cache_ptr->magic = H5C__H5C_T_MAGIC;
+
+ cache_ptr->max_type_id = max_type_id;
+ cache_ptr->type_name_table_ptr = type_name_table_ptr;
+
+ cache_ptr->max_cache_size = max_cache_size;
+ cache_ptr->min_clean_size = min_clean_size;
+
+ cache_ptr->check_write_permitted = check_write_permitted;
+
+ cache_ptr->index_len = 0;
+ cache_ptr->index_size = (size_t)0;
+
+ cache_ptr->pl_len = 0;
+ cache_ptr->pl_size = (size_t)0;
+ cache_ptr->pl_head_ptr = NULL;
+ cache_ptr->pl_tail_ptr = NULL;
+
+ cache_ptr->LRU_list_len = 0;
+ cache_ptr->LRU_list_size = (size_t)0;
+ cache_ptr->LRU_head_ptr = NULL;
+ cache_ptr->LRU_tail_ptr = NULL;
+
+ cache_ptr->cLRU_list_len = 0;
+ cache_ptr->cLRU_list_size = (size_t)0;
+ cache_ptr->cLRU_head_ptr = NULL;
+ cache_ptr->cLRU_tail_ptr = NULL;
+
+ cache_ptr->dLRU_list_len = 0;
+ cache_ptr->dLRU_list_size = (size_t)0;
+ cache_ptr->dLRU_head_ptr = NULL;
+ cache_ptr->dLRU_tail_ptr = NULL;
+
+ H5C_stats__reset(cache_ptr);
+
+ cache_ptr->skip_file_checks = FALSE;
+ cache_ptr->skip_dxpl_id_checks = FALSE;
+
+ /* Set return value */
+ ret_value = cache_ptr;
+
+done:
+
+ if ( ret_value == 0 ) {
+
+ if ( cache_ptr != NULL ) {
+
+ if ( cache_ptr->index_tree_ptr != NULL ) {
+
+ /* the index tree should be empty, so we can pass in
+ * NULL for the fd & fk parameters.
+ */
+ H5TB_dfree(cache_ptr->index_tree_ptr, NULL, NULL);
+ }
+
+ cache_ptr->magic = 0;
+ H5FL_FREE(H5C_t, cache_ptr);
+ cache_ptr = NULL;
+
+ } /* end if */
+
+ } /* end if */
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C_create() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_dest
+ *
+ * Purpose: Flush all data to disk and destroy the cache.
+ *
+ * This function fails if any object are protected since the
+ * resulting file might not be consistent.
+ *
+ * The primary_dxpl_id and secondary_dxpl_id parameters
+ * specify the dxpl_ids used on the first write occasioned
+ * by the destroy (primary_dxpl_id), and on all subsequent
+ * writes (secondary_dxpl_id). This is useful in the metadata
+ * cache, but may not be needed elsewhere. If so, just use the
+ * same dxpl_id for both parameters.
+ *
+ * Note that *cache_ptr has been freed upon successful return.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 6/2/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_dest(H5F_t * f,
+ hid_t primary_dxpl_id,
+ hid_t secondary_dxpl_id,
+ H5C_t * cache_ptr)
+{
+ herr_t ret_value=SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5C_dest, FAIL)
+
+ HDassert( cache_ptr );
+ HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
+ HDassert( cache_ptr->skip_file_checks || f );
+
+ if ( H5C_flush_cache(f, primary_dxpl_id, secondary_dxpl_id,
+ cache_ptr, H5F_FLUSH_INVALIDATE) < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush cache")
+ }
+
+ if ( cache_ptr->index_tree_ptr != NULL ) {
+
+ /* the index tree should be empty, so we can pass in
+ * NULL for the fd & fk parameters.
+ */
+ H5TB_dfree(cache_ptr->index_tree_ptr, NULL, NULL);
+ cache_ptr->index_tree_ptr = NULL;
+ }
+
+ cache_ptr->magic = 0;
+
+ H5FL_FREE(H5C_t, cache_ptr);
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C_dest() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_dest_empty
+ *
+ * Purpose: Destroy an empty cache.
+ *
+ * This function fails if the cache is not empty on entry.
+ *
+ * Note that *cache_ptr has been freed upon successful return.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 6/2/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_dest_empty(H5C_t * cache_ptr)
+{
+ herr_t ret_value=SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5C_dest_empty, FAIL)
+
+ /* This would normally be an assert, but we need to use an HGOTO_ERROR
+ * call to shut up the compiler.
+ */
+ if ( ( ! cache_ptr ) ||
+ ( cache_ptr->magic != H5C__H5C_T_MAGIC ) ||
+ ( cache_ptr->index_len != 0 ) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "Bad cache_ptr or non-empty cache on entry.")
+ }
+
+
+ if ( cache_ptr->index_tree_ptr != NULL ) {
+
+ /* the index tree should be empty, so we can pass in
+ * NULL for the fd & fk parameters.
+ */
+ H5TB_dfree(cache_ptr->index_tree_ptr, NULL, NULL);
+ cache_ptr->index_tree_ptr = NULL;
+ }
+
+ cache_ptr->magic = 0;
+
+ H5FL_FREE(H5C_t, cache_ptr);
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C_dest_empty() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_flush_cache
+ *
+ * Purpose: Flush (and possibly destroy) the entries contained in the
+ * specified cache.
+ *
+ * If the cache contains protected entries, the function will
+ * fail, as protected entries cannot be flushed. However
+ * all unprotected entries should be flushed before the
+ * function returns failure.
+ *
+ * The primary_dxpl_id and secondary_dxpl_id parameters
+ * specify the dxpl_ids used on the first write occasioned
+ * by the flush (primary_dxpl_id), and on all subsequent
+ * writes (secondary_dxpl_id). This is useful in the metadata
+ * cache, but may not be needed elsewhere. If so, just use the
+ * same dxpl_id for both parameters.
+ *
+ * Return: Non-negative on success/Negative on failure or if there was
+ * a request to flush all items and something was protected.
+ *
+ * Programmer: John Mainzer
+ * 6/2/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_flush_cache(H5F_t * f,
+ hid_t primary_dxpl_id,
+ hid_t secondary_dxpl_id,
+ H5C_t * cache_ptr,
+ unsigned flags)
+{
+ herr_t status;
+ herr_t ret_value = SUCCEED;
+ hbool_t destroy = ( (flags & H5F_FLUSH_INVALIDATE) != 0 );
+ hbool_t first_flush = TRUE;
+ int32_t protected_entries = 0;
+ H5TB_NODE * node_ptr;
+ H5C_cache_entry_t * entry_ptr;
+#if H5C_DO_SANITY_CHECKS
+ int32_t actual_index_len = 0;
+ size_t actual_index_size = 0;
+#endif /* H5C_DO_SANITY_CHECKS */
+
+ FUNC_ENTER_NOAPI(H5C_flush_cache, FAIL)
+
+ HDassert( cache_ptr );
+ HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
+ HDassert( cache_ptr->skip_file_checks || f );
+
+
+ if ( cache_ptr->index_tree_ptr->root == NULL ) {
+
+ node_ptr = NULL;
+ HDassert( cache_ptr->index_len == 0 );
+ HDassert( cache_ptr->index_size == 0 );
+
+ } else {
+
+ node_ptr = H5TB_first(cache_ptr->index_tree_ptr->root);
+ }
+
+ while ( node_ptr != NULL )
+ {
+ entry_ptr = (H5C_cache_entry_t *)(node_ptr->data);
+ HDassert( entry_ptr != NULL );
+
+#if H5C_DO_SANITY_CHECKS
+ actual_index_len++;
+ actual_index_size += entry_ptr->size;
+#endif /* H5C_DO_SANITY_CHECKS */
+
+ if ( entry_ptr->protected ) {
+
+ /* we have major problems -- but lets flush everything
+ * we can before we flag an error.
+ */
+ protected_entries++;
+
+ } else {
+
+ status = H5C_flush_single_entry(f,
+ primary_dxpl_id,
+ secondary_dxpl_id,
+ cache_ptr,
+ NULL,
+ entry_ptr->addr,
+ flags,
+ node_ptr,
+ &first_flush,
+ FALSE);
+ if ( status < 0 ) {
+
+ /* This shouldn't happen -- if it does, we are toast so
+ * just scream and die.
+ */
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "Can't flush entry.")
+ }
+ }
+
+ node_ptr = H5TB_next(node_ptr);
+
+ } /* while */
+
+ HDassert( protected_entries == cache_ptr->pl_len );
+
+#if H5C_DO_SANITY_CHECKS
+ HDassert( actual_index_len == cache_ptr->index_len );
+ HDassert( actual_index_size == cache_ptr->index_size );
+#endif /* H5C_DO_SANITY_CHECKS */
+
+ if ( destroy ) {
+
+ /* don't pass in any key or data free functions, as all
+ * unprotected entries should have already been destroyed.
+ */
+ H5TB_free(&(cache_ptr->index_tree_ptr->root), NULL, NULL);
+ cache_ptr->index_len = 0;
+ cache_ptr->index_size = 0;
+
+ if ( protected_entries > 0 )
+ {
+ /* the caller asked us to flush and destroy a cache that
+ * contains one or more protected entries. Since we can't
+ * flush protected entries, we haven't destroyed them either.
+ * Since they are all on the protected list, just re-insert
+ * them into the tree before we flag an error.
+ */
+ entry_ptr = cache_ptr->pl_head_ptr;
+
+ while ( entry_ptr != NULL )
+ {
+ if ( H5C_insert_entry_in_tree(cache_ptr, entry_ptr) < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "Can't re-insert protected entry.")
+ }
+ entry_ptr = entry_ptr->next;
+ }
+ }
+ }
+
+ if ( protected_entries > 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_PROTECT, FAIL, "cache has protected items")
+ }
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C_flush_cache() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_insert_entry
+ *
+ * Purpose: Adds the specified thing to the cache. The thing need not
+ * exist on disk yet, but it must have an address and disk
+ * space reserved.
+ *
+ * The primary_dxpl_id and secondary_dxpl_id parameters
+ * specify the dxpl_ids used on the first write occasioned
+ * by the insertion (primary_dxpl_id), and on all subsequent
+ * writes (secondary_dxpl_id). This is useful in the
+ * metadata cache, but may not be needed elsewhere. If so,
+ * just use the same dxpl_id for both parameters.
+ *
+ * The primary_dxpl_id is the dxpl_id passed to the
+ * check_write_permitted function if such a function has been
+ * provided.
+ *
+ * Observe that this function cannot occasion a read.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 6/2/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+herr_t
+H5C_insert_entry(H5F_t * f,
+ hid_t primary_dxpl_id,
+ hid_t secondary_dxpl_id,
+ H5C_t * cache_ptr,
+ const H5C_class_t * type,
+ haddr_t addr,
+ void * thing)
+{
+ herr_t result;
+ herr_t ret_value = SUCCEED; /* Return value */
+ hbool_t write_permitted = TRUE;
+ H5C_cache_entry_t * entry_ptr;
+ H5TB_NODE * node_ptr = NULL;
+
+ FUNC_ENTER_NOAPI(H5C_insert_entry, FAIL)
+
+ HDassert( cache_ptr );
+ HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
+ HDassert( cache_ptr->skip_file_checks || f );
+ HDassert( type );
+ HDassert( type->flush );
+ HDassert( type->size );
+ HDassert( H5F_addr_defined(addr) );
+ HDassert( thing );
+
+ entry_ptr = (H5C_cache_entry_t *)thing;
+
+ entry_ptr->addr = addr;
+ entry_ptr->type = type;
+
+ if ( (type->size)(f, thing, &(entry_ptr->size)) < 0 ) {
+
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGETSIZE, FAIL, \
+ "Can't get size of thing")
+ }
+
+ HDassert( entry_ptr->size < H5C_MAX_ENTRY_SIZE );
+
+ entry_ptr->next = NULL;
+ entry_ptr->prev = NULL;
+ entry_ptr->aux_next = NULL;
+ entry_ptr->aux_prev = NULL;
+
+ H5C__RESET_CACHE_ENTRY_STATS(entry_ptr)
+
+ if ( (cache_ptr->index_size + entry_ptr->size) > cache_ptr->max_cache_size ){
+
+ size_t space_needed;
+
+ if ( cache_ptr->check_write_permitted != NULL ) {
+
+ result = (cache_ptr->check_write_permitted)(f,
+ primary_dxpl_id,
+ &write_permitted);
+
+ if ( result < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, \
+ "Can't get write_permitted")
+ }
+ }
+
+ HDassert( entry_ptr->size <= H5C_MAX_ENTRY_SIZE );
+
+ space_needed = (cache_ptr->index_size + entry_ptr->size) -
+ cache_ptr->max_cache_size;
+
+ /* It would be nice to be able to do a tight sanity check on
+ * space_needed here, but it is hard to assign an upper bound on
+ * its value other than then value assigned to it.
+ *
+ * This fact springs from several features of the cache:
+ *
+ * First, it is possible for the cache to grow without
+ * bound as long as entries are protected and not unprotected.
+ *
+ * Second, when writes are not permitted it is also possible
+ * for the cache to grow without bound.
+ *
+ * Finally, we don't check to see if the cache is oversized
+ * at the end of an unprotect. As a result, it is possible
+ * to have a vastly oversized cache with no protected entries
+ * as long as all the protects preceed the unprotects.
+ *
+ * Since items 1 and 2 are not changing any time soon, I see
+ * no point in worrying about the third.
+ *
+ * In any case, I hope this explains why there is no sanity
+ * check on space_needed here.
+ */
+
+ result = H5C_make_space_in_cache(f,
+ primary_dxpl_id,
+ secondary_dxpl_id,
+ cache_ptr,
+ space_needed,
+ write_permitted);
+
+ if ( result < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, \
+ "H5C_make_space_in_cache failed.")
+ }
+ }
+
+ /* verify the the new entry isn't already in the tree -- scream
+ * and die if it is.
+ */
+ node_ptr = H5TB_dfind(cache_ptr->index_tree_ptr, entry_ptr, NULL);
+
+ if ( node_ptr != NULL ) {
+
+ if ( node_ptr->key == ((void *)(entry_ptr)) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, \
+ "entry already in cache.")
+
+ } else {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, \
+ "duplicate entry in cache.")
+
+ }
+ }
+
+ /* we don't initialize the protected field until here as it is
+ * possible that the entry is already in the cache, and already
+ * protected. If it is, we don't want to make things worse by
+ * marking it unprotected.
+ */
+
+ entry_ptr->protected = FALSE;
+
+ if ( H5C_insert_entry_in_tree(cache_ptr, entry_ptr) < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, \
+ "Can't insert entry in tree.")
+
+ }
+
+ if ( H5C_update_rp_for_insertion(cache_ptr, entry_ptr) < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, \
+ "Can't update replacement policy for insertion.")
+
+ }
+
+ H5C__UPDATE_STATS_FOR_INSERTION(cache_ptr, entry_ptr)
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C_insert_entry() */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C_rename_entry
+ *
+ * Purpose: Use this function to notify the cache that an entry's
+ * file address changed.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 6/2/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+herr_t
+H5C_rename_entry(H5F_t * f,
+ H5C_t * cache_ptr,
+ const H5C_class_t * type,
+ haddr_t old_addr,
+ haddr_t new_addr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+ H5TB_NODE * new_node_ptr = NULL;
+ H5TB_NODE * old_node_ptr = NULL;
+ H5C_cache_entry_t * entry_ptr;
+ H5C_cache_entry_t search_target;
+
+ FUNC_ENTER_NOAPI(H5C_rename_entry, FAIL)
+
+ HDassert( cache_ptr );
+ HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
+ HDassert( cache_ptr->skip_file_checks || f );
+ HDassert( type );
+ HDassert( H5F_addr_defined(old_addr) );
+ HDassert( H5F_addr_defined(new_addr) );
+ HDassert( H5F_addr_ne(old_addr, new_addr) );
+
+ search_target.addr = old_addr;
+ old_node_ptr = H5TB_dfind(cache_ptr->index_tree_ptr,
+ (void *)(&search_target),
+ NULL);
+
+ if ( ( old_node_ptr == NULL ) ||
+ ( ((H5C_cache_entry_t *)(old_node_ptr->key))->type != type ) ) {
+
+ /* the old item doesn't exist in the cache, so we are done. */
+ HGOTO_DONE(SUCCEED)
+
+ } else {
+
+ entry_ptr = old_node_ptr->key;
+ HDassert( entry_ptr->addr == old_addr );
+ HDassert( entry_ptr->type == type );
+ HDassert( !(entry_ptr->protected) );
+ }
+
+ search_target.addr = new_addr;
+ new_node_ptr = H5TB_dfind(cache_ptr->index_tree_ptr,
+ (void *)&search_target,
+ NULL);
+
+ if ( new_node_ptr != NULL ) { /* we are hosed */
+
+ if ( ((H5C_cache_entry_t *)(new_node_ptr->key))->type == type ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTRENAME, FAIL, \
+ "Target already renamed & reinserted???.")
+
+ } else {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTRENAME, FAIL, \
+ "New address already in use?.")
+
+ }
+ }
+
+ /* If we get this far, we have work to do. Remove *entry_ptr from
+ * the tree, change its address to the new address, and then re-insert.
+ * Update the replacement policy for a hit to avoid an eviction before
+ * the renamed entry is touched. Update stats for a rename.
+ *
+ * Note that we do not check the size of the cache, or evict anything.
+ * Since this is a simple re-name, cache size should be unaffected.
+ */
+
+ if ( H5C_remove_entry_from_tree(cache_ptr, entry_ptr, old_node_ptr) < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTRENAME, FAIL, \
+ "Can't remove entry from tree.")
+ }
+
+ entry_ptr->addr = new_addr;
+
+ if ( H5C_insert_entry_in_tree(cache_ptr, entry_ptr) < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTRENAME, FAIL, \
+ "Can't re-insert entry from tree.")
+ }
+
+ if ( H5C_update_rp_for_rename(cache_ptr, entry_ptr) < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTRENAME, FAIL, \
+ "Can't can't update replacement policy for a hit.")
+ }
+
+ H5C__UPDATE_STATS_FOR_RENAME(cache_ptr, entry_ptr)
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C_rename_entry() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_protect
+ *
+ * Purpose: If the target entry is not in the cache, load it. If
+ * necessary, attempt to evict one or more entries to keep
+ * the cache within its maximum size.
+ *
+ * Mark the target entry as protected, and return its address
+ * to the caller. The caller must call H5C_unprotect() when
+ * finished with the entry.
+ *
+ * While it is protected, the entry may not be either evicted
+ * or flushed -- nor may it be accessed by another call to
+ * H5C_protect. Any attempt to do so will result in a failure.
+ *
+ * The primary_dxpl_id and secondary_dxpl_id parameters
+ * specify the dxpl_ids used on the first write occasioned
+ * by the insertion (primary_dxpl_id), and on all subsequent
+ * writes (secondary_dxpl_id). This is useful in the
+ * metadata cache, but may not be needed elsewhere. If so,
+ * just use the same dxpl_id for both parameters.
+ *
+ * All reads are performed with the primary_dxpl_id.
+ *
+ * Similarly, the primary_dxpl_id is passed to the
+ * check_write_permitted function if it is called.
+ *
+ * Return: Success: Ptr to the desired entry
+ *
+ * Failure: NULL
+ *
+ * Programmer: John Mainzer - 6/2/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+void *
+H5C_protect(H5F_t * f,
+ hid_t primary_dxpl_id,
+ hid_t secondary_dxpl_id,
+ H5C_t * cache_ptr,
+ const H5C_class_t * type,
+ haddr_t addr,
+ const void * udata1,
+ void * udata2)
+{
+ hbool_t hit = FALSE;
+ void * thing = NULL;
+ H5C_cache_entry_t * entry_ptr;
+ H5TB_NODE * node_ptr = NULL;
+ void * ret_value; /* Return value */
+ H5C_cache_entry_t search_target;
+
+ FUNC_ENTER_NOAPI(H5C_protect, NULL)
+
+ /* check args */
+ HDassert( cache_ptr );
+ HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
+ HDassert( cache_ptr->skip_file_checks || f );
+ HDassert( type );
+ HDassert( type->flush );
+ HDassert( type->load );
+ HDassert( H5F_addr_defined(addr) );
+
+ /* first check to see if the target is in cache */
+ search_target.addr = addr;
+ node_ptr = H5TB_dfind(cache_ptr->index_tree_ptr,
+ (void *)(&search_target),
+ NULL);
+
+ if ( node_ptr != NULL ) {
+
+ hit = TRUE;
+ thing = node_ptr->key;
+ entry_ptr = (H5C_cache_entry_t *)thing;
+
+ } else { /* must try to load the entry from disk. */
+
+ hit = FALSE;
+ thing = H5C_load_entry(f, primary_dxpl_id, type, addr, udata1, udata2,
+ cache_ptr->skip_file_checks);
+
+ if ( thing == NULL ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "can't load entry")
+ }
+
+ entry_ptr = (H5C_cache_entry_t *)thing;
+
+ /* try to free up some space if necessay */
+ if ( (cache_ptr->index_size + entry_ptr->size) >
+ cache_ptr->max_cache_size ) {
+
+ hbool_t write_permitted = TRUE;
+ herr_t result;
+ size_t space_needed;
+
+ if ( cache_ptr->check_write_permitted != NULL ) {
+
+ result = (cache_ptr->check_write_permitted)(f,
+ primary_dxpl_id,
+ &write_permitted);
+
+ if ( result < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, \
+ "Can't get write_permitted")
+ }
+ }
+
+ HDassert( entry_ptr->size <= H5C_MAX_ENTRY_SIZE );
+
+ space_needed = (cache_ptr->index_size + entry_ptr->size) -
+ cache_ptr->max_cache_size;
+
+ /* It would be nice to be able to do a tight sanity check on
+ * space_needed here, but it is hard to assign an upper bound on
+ * its value other than then value assigned to it.
+ *
+ * This fact springs from several features of the cache:
+ *
+ * First, it is possible for the cache to grow without
+ * bound as long as entries are protected and not unprotected.
+ *
+ * Second, when writes are not permitted it is also possible
+ * for the cache to grow without bound.
+ *
+ * Finally, we don't check to see if the cache is oversized
+ * at the end of an unprotect. As a result, it is possible
+ * to have a vastly oversized cache with no protected entries
+ * as long as all the protects preceed the unprotects.
+ *
+ * Since items 1 and 2 are not changing any time soon, I see
+ * no point in worrying about the third.
+ *
+ * In any case, I hope this explains why there is no sanity
+ * check on space_needed here.
+ */
+
+ result = H5C_make_space_in_cache(f, primary_dxpl_id,
+ secondary_dxpl_id, cache_ptr,
+ space_needed, write_permitted);
+
+ if ( result < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, \
+ "H5C_make_space_in_cache failed.")
+ }
+ }
+
+ /* insert the entry in the tree and in the protected list. */
+ if ( H5C_insert_entry_in_tree(cache_ptr, entry_ptr) < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, \
+ "Can't insert newly loaded entry in tree.")
+ }
+
+ /* insert the entry in the data structures used by the replacement
+ * policy. We are just going to take it out again when we update
+ * the replacement policy for a protect, but this simplifies the
+ * code. If we do this often enough, we may want to optimize this.
+ */
+
+ if ( H5C_update_rp_for_insertion(cache_ptr, entry_ptr) < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, \
+ "Can't update replacement policy for newly loaded entry.")
+
+ }
+ }
+
+ HDassert( entry_ptr->addr == addr );
+ HDassert( entry_ptr->type == type );
+
+ if ( entry_ptr->protected ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, \
+ "Target already protected?!?.")
+ }
+
+ if ( H5C_update_rp_for_protect(cache_ptr, entry_ptr) < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, \
+ "Can't update replacement policy for protect")
+ }
+
+ entry_ptr->protected = TRUE;
+
+ ret_value = thing;
+
+ H5C__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit)
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C_protect() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_unprotect
+ *
+ * Purpose: Undo an H5C_protect() call -- specifically, mark the
+ * entry as unprotected, remove it from the protected list,
+ * and give it back to the replacement policy.
+ *
+ * The TYPE and ADDR arguments must be the same as those in
+ * the corresponding call to H5C_protect() and the THING
+ * argument must be the value returned by that call to
+ * H5C_protect().
+ *
+ * The primary_dxpl_id and secondary_dxpl_id parameters
+ * specify the dxpl_ids used on the first write occasioned
+ * by the unprotect (primary_dxpl_id), and on all subsequent
+ * writes (secondary_dxpl_id). Since an uprotect cannot
+ * occasion a write at present, all this is moot for now.
+ * However, things change, and in any case,
+ * H5C_flush_single_entry() needs primary_dxpl_id and
+ * secondary_dxpl_id in its parameter list.
+ *
+ * The function can't cause a read either, so the dxpl_id
+ * parameters are moot in this case as well.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * If the deleted flag is TRUE, simply remove the target entry
+ * from the cache, clear it, and free it without writing it to
+ * disk.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 6/2/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_unprotect(H5F_t * f,
+ hid_t primary_dxpl_id,
+ hid_t secondary_dxpl_id,
+ H5C_t * cache_ptr,
+ const H5C_class_t * type,
+ haddr_t addr,
+ void * thing,
+ hbool_t deleted)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+ H5C_cache_entry_t * entry_ptr;
+
+ FUNC_ENTER_NOAPI(H5C_unprotect, FAIL)
+
+ HDassert( cache_ptr );
+ HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
+ HDassert( cache_ptr->skip_file_checks || f );
+ HDassert( type );
+ HDassert( type->clear );
+ HDassert( type->flush );
+ HDassert( H5F_addr_defined(addr) );
+ HDassert( thing );
+
+ entry_ptr = (H5C_cache_entry_t *)thing;
+
+ HDassert( entry_ptr->addr == addr );
+ HDassert( entry_ptr->type == type );
+
+ if ( ! (entry_ptr->protected) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, \
+ "Entry already unprotected??")
+ }
+
+ if ( H5C_update_rp_for_unprotect(cache_ptr, entry_ptr) < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, \
+ "Can't update replacement policy for unprotect.")
+ }
+
+ entry_ptr->protected = FALSE;
+
+ /* this implementation of the "deleted" option is a bit inefficient, as
+ * we re-insert the entry to be deleted into the replacement policy
+ * data structures, only to remove them again. Depending on how often
+ * we do this, we may want to optimize a bit.
+ *
+ * On the other hand, this implementation is reasonably clean, and
+ * makes good use of existing code.
+ * JRM - 5/19/04
+ */
+ if ( deleted ) {
+
+ /* the following first flush flag will never be used as we are
+ * calling H5C_flush_single_entry with both the H5F_FLUSH_CLEAR_ONLY
+ * and H5F_FLUSH_INVALIDATE flags. However, it is needed for the
+ * function call.
+ */
+ hbool_t dummy_first_flush = TRUE;
+ H5TB_NODE * node_ptr;
+
+ /* verify that the target entry is in the tree. */
+
+ node_ptr = H5TB_dfind(cache_ptr->index_tree_ptr, entry_ptr, NULL);
+
+ if ( ( node_ptr == NULL ) || ( node_ptr->key != thing ) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, \
+ "thing not in tree?!?.")
+ }
+
+ if ( H5C_flush_single_entry(f,
+ primary_dxpl_id,
+ secondary_dxpl_id,
+ cache_ptr,
+ type,
+ addr,
+ (H5F_FLUSH_CLEAR_ONLY|H5F_FLUSH_INVALIDATE),
+ node_ptr,
+ &dummy_first_flush,
+ TRUE) < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, \
+ "thing not in tree?!?.")
+ }
+ }
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C_unprotect() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_stats
+ *
+ * Purpose: Prints statistics about the cache.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 6/2/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+herr_t
+H5C_stats(H5C_t * cache_ptr,
+ const char * cache_name,
+ hbool_t display_detailed_stats)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+#if H5C_COLLECT_CACHE_STATS
+ int i;
+ int64_t total_hits = 0;
+ int64_t total_misses = 0;
+ int64_t total_insertions = 0;
+ int64_t total_clears = 0;
+ int64_t total_flushes = 0;
+ int64_t total_evictions = 0;
+ int64_t total_renames = 0;
+ int32_t aggregate_max_accesses = 0;
+ int32_t aggregate_min_accesses = 1000000;
+ int32_t aggregate_max_clears = 0;
+ int32_t aggregate_max_flushes = 0;
+ double hit_rate;
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ FUNC_ENTER_NOAPI(H5C_stats, FAIL)
+
+ /* This would normally be an assert, but we need to use an HGOTO_ERROR
+ * call to shut up the compiler.
+ */
+ if ( ( ! cache_ptr ) ||
+ ( cache_ptr->magic != H5C__H5C_T_MAGIC ) ||
+ ( !cache_name ) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr or cache_name")
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+
+ for ( i = 0; i <= cache_ptr->max_type_id; i++ ) {
+
+ total_hits += cache_ptr->hits[i];
+ total_misses += cache_ptr->misses[i];
+ total_insertions += cache_ptr->insertions[i];
+ total_clears += cache_ptr->clears[i];
+ total_flushes += cache_ptr->flushes[i];
+ total_evictions += cache_ptr->evictions[i];
+ total_renames += cache_ptr->renames[i];
+#if H5C_COLLECT_CACHE_ENTRY_STATS
+ if ( aggregate_max_accesses < cache_ptr->max_accesses[i] )
+ aggregate_max_accesses = cache_ptr->max_accesses[i];
+ if ( aggregate_min_accesses > aggregate_max_accesses )
+ aggregate_min_accesses = aggregate_max_accesses;
+ if ( aggregate_min_accesses > cache_ptr->min_accesses[i] )
+ aggregate_min_accesses = cache_ptr->min_accesses[i];
+ if ( aggregate_max_clears < cache_ptr->max_clears[i] )
+ aggregate_max_clears = cache_ptr->max_clears[i];
+ if ( aggregate_max_flushes < cache_ptr->max_flushes[i] )
+ aggregate_max_flushes = cache_ptr->max_flushes[i];
+#endif /* H5C_COLLECT_CACHE_ENTRY_STATS */
+ }
+
+ if ( ( total_hits > 0 ) || ( total_misses > 0 ) ) {
+
+ hit_rate = 100.0 * ((double)(total_hits)) /
+ ((double)(total_hits + total_misses));
+ } else {
+ hit_rate = 0.0;
+ }
+
+ HDfprintf(stdout, "\nH5C: cache statistics for %s\n",
+ cache_name);
+
+ HDfprintf(stdout, "\n");
+
+ HDfprintf(stdout,
+ " current (max) index size / length = %ld (%ld) / %ld (%ld)\n",
+ (long)(cache_ptr->index_size),
+ (long)(cache_ptr->max_index_size),
+ (long)(cache_ptr->index_len),
+ (long)(cache_ptr->max_index_len));
+
+ HDfprintf(stdout,
+ " current (max) PL size / length = %ld (%ld) / %ld (%ld)\n",
+ (long)(cache_ptr->pl_size),
+ (long)(cache_ptr->max_pl_size),
+ (long)(cache_ptr->pl_len),
+ (long)(cache_ptr->max_pl_len));
+
+ HDfprintf(stdout,
+ " current LRU list size / length = %ld / %ld\n",
+ (long)(cache_ptr->LRU_list_size),
+ (long)(cache_ptr->LRU_list_len));
+
+ HDfprintf(stdout,
+ " current clean LRU size / length = %ld / %ld\n",
+ (long)(cache_ptr->cLRU_list_size),
+ (long)(cache_ptr->cLRU_list_len));
+
+ HDfprintf(stdout,
+ " current dirty LRU size / length = %ld / %ld\n",
+ (long)(cache_ptr->dLRU_list_size),
+ (long)(cache_ptr->dLRU_list_len));
+
+ HDfprintf(stdout,
+ " Total hits / misses / hit_rate = %ld / %ld / %f\n",
+ (long)total_hits,
+ (long)total_misses,
+ hit_rate);
+
+ HDfprintf(stdout,
+ " Total clears / flushes / evictions = %ld / %ld / %ld\n",
+ (long)total_clears,
+ (long)total_flushes,
+ (long)total_evictions);
+
+ HDfprintf(stdout, " Total insertions / renames = %ld / %ld\n",
+ (long)total_insertions,
+ (long)total_renames);
+
+#if H5C_COLLECT_CACHE_ENTRY_STATS
+
+ HDfprintf(stdout, " aggregate max / min accesses = %d / %d\n",
+ (int)aggregate_max_accesses,
+ (int)aggregate_min_accesses);
+
+ HDfprintf(stdout, " aggregate max_clears / max_flushes = %d / %d\n",
+ (int)aggregate_max_clears,
+ (int)aggregate_max_flushes);
+
+#endif /* H5C_COLLECT_CACHE_ENTRY_STATS */
+
+ if ( display_detailed_stats )
+ {
+
+ for ( i = 0; i <= cache_ptr->max_type_id; i++ ) {
+
+ HDfprintf(stdout, "\n");
+
+ HDfprintf(stdout, " Stats on %s:\n",
+ (*(cache_ptr->type_name_table_ptr))[i]);
+
+ if ( ( cache_ptr->hits[i] > 0 ) || ( cache_ptr->misses[i] > 0 ) ) {
+
+ hit_rate = 100.0 * ((double)(cache_ptr->hits[i])) /
+ ((double)(cache_ptr->hits[i] + cache_ptr->misses[i]));
+ } else {
+ hit_rate = 0.0;
+ }
+
+ HDfprintf(stdout,
+ " hits / misses / hit_rate = %ld / %ld / %f\n",
+ (long)(cache_ptr->hits[i]),
+ (long)(cache_ptr->misses[i]),
+ hit_rate);
+
+ HDfprintf(stdout,
+ " clears / flushes / evictions = %ld / %ld / %ld\n",
+ (long)(cache_ptr->clears[i]),
+ (long)(cache_ptr->flushes[i]),
+ (long)(cache_ptr->evictions[i]));
+
+ HDfprintf(stdout,
+ " insertions / renames = %ld / %ld\n",
+ (long)(cache_ptr->insertions[i]),
+ (long)(cache_ptr->renames[i]));
+
+#if H5C_COLLECT_CACHE_ENTRY_STATS
+
+ HDfprintf(stdout,
+ " entry max / min accesses = %d / %d\n",
+ cache_ptr->max_accesses[i],
+ cache_ptr->min_accesses[i]);
+
+ HDfprintf(stdout,
+ " entry max_clears / max_flushes = %d / %d\n",
+ cache_ptr->max_clears[i],
+ cache_ptr->max_flushes[i]);
+
+#endif /* H5C_COLLECT_CACHE_ENTRY_STATS */
+
+ }
+ }
+
+ HDfprintf(stdout, "\n");
+
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C_stats() */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C_stats__reset
+ *
+ * Purpose: Reset the stats fields to their initial values.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer, 4/28/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+void
+H5C_stats__reset(H5C_t * cache_ptr)
+{
+ int i;
+
+ HDassert( cache_ptr );
+ HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
+
+#if H5C_COLLECT_CACHE_STATS
+ for ( i = 0; i <= cache_ptr->max_type_id; i++ )
+ {
+ cache_ptr->hits[i] = 0;
+ cache_ptr->misses[i] = 0;
+ cache_ptr->insertions[i] = 0;
+ cache_ptr->clears[i] = 0;
+ cache_ptr->flushes[i] = 0;
+ cache_ptr->evictions[i] = 0;
+ cache_ptr->renames[i] = 0;
+ }
+
+ cache_ptr->max_index_len = 0;
+ cache_ptr->max_index_size = (size_t)0;
+
+ cache_ptr->max_pl_len = 0;
+ cache_ptr->max_pl_size = (size_t)0;
+
+#if H5C_COLLECT_CACHE_ENTRY_STATS
+
+ for ( i = 0; i <= cache_ptr->max_type_id; i++ )
+ {
+ cache_ptr->max_accesses[i] = 0;
+ cache_ptr->min_accesses[i] = 1000000;
+ cache_ptr->max_clears[i] = 0;
+ cache_ptr->max_flushes[i] = 0;
+ }
+
+#endif /* H5C_COLLECT_CACHE_ENTRY_STATS */
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ return;
+
+} /* H5C_stats__reset() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_set_skip_flags
+ *
+ * Purpose: Set the values of the skip sanity check flags.
+ *
+ * This function and the skip sanity check flags were created
+ * for the convenience of the test bed. However it is
+ * possible that there may be other uses for the flags.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 6/11/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+herr_t
+H5C_set_skip_flags(H5C_t * cache_ptr,
+ hbool_t skip_file_checks,
+ hbool_t skip_dxpl_id_checks)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5C_set_skip_flags, FAIL)
+
+ /* This would normally be an assert, but we need to use an HGOTO_ERROR
+ * call to shut up the compiler.
+ */
+ if ( ( ! cache_ptr ) || ( cache_ptr->magic != H5C__H5C_T_MAGIC ) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr")
+ }
+
+ cache_ptr->skip_file_checks = skip_file_checks;
+ cache_ptr->skip_dxpl_id_checks = skip_dxpl_id_checks;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C_set_skip_flags() */
+
+
+/*************************************************************************/
+/**************************** Private Functions: *************************/
+/*************************************************************************/
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C_flush_single_entry
+ *
+ * Purpose: Flush or clear (and evict if requested) the cache entry
+ * with the specified address and type. If the type is NULL,
+ * any unprotected entry at the specified address will be
+ * flushed (and possibly evicted).
+ *
+ * Attempts to flush a protected entry will result in an
+ * error.
+ *
+ * *first_flush_ptr should be true if only one
+ * flush is contemplated before the next load, or if this
+ * is the first of a sequence of flushes that will be
+ * completed before the next load. *first_flush_ptr is set
+ * to false if a flush actually takes place, and should be
+ * left false until the end of the sequence.
+ *
+ * The primary_dxpl_id is used if *first_flush_ptr is TRUE
+ * on entry, and a flush actually takes place. The
+ * secondary_dxpl_id is used in any subsequent flush where
+ * *first_flush_ptr is FALSE on entry.
+ *
+ * If the H5F_FLUSH_CLEAR_ONLY flag is set, the entry will
+ * be cleared and not flushed -- in the case *first_flush_ptr,
+ * primary_dxpl_id, and secondary_dxpl_id are all irrelevent,
+ * and the call can't be part of a sequence of flushes.
+ *
+ * If the caller knows the address of the TBBT node at
+ * which the target entry resides, it can avoid a lookup
+ * by supplying that address in the tgt_node_ptr parameter.
+ * If this parameter is NULL, the function will do a TBBT
+ * search for the entry instead.
+ *
+ * The function does nothing silently if there is no entry
+ * at the supplied address, or if the entry found has the
+ * wrong type.
+ *
+ * Return: Non-negative on success/Negative on failure or if there was
+ * an attempt to flush a protected item.
+ *
+ * Programmer: John Mainzer, 5/5/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C_flush_single_entry(H5F_t * f,
+ hid_t primary_dxpl_id,
+ hid_t secondary_dxpl_id,
+ H5C_t * cache_ptr,
+ const H5C_class_t * type_ptr,
+ haddr_t addr,
+ unsigned flags,
+ H5TB_NODE * tgt_node_ptr,
+ hbool_t * first_flush_ptr,
+ hbool_t remove_entry_from_tree_on_destroy)
+{
+ hbool_t destroy = ( (flags & H5F_FLUSH_INVALIDATE) != 0 );
+ hbool_t clear_only = ( (flags & H5F_FLUSH_CLEAR_ONLY) != 0);
+ herr_t ret_value = SUCCEED; /* Return value */
+ herr_t status;
+ H5TB_NODE * node_ptr;
+ H5C_cache_entry_t * entry_ptr = NULL;
+ H5C_cache_entry_t search_target;
+
+ FUNC_ENTER_NOAPI(H5C_flush_single_entry, FAIL)
+
+ HDassert( cache_ptr );
+ HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
+ HDassert( cache_ptr->skip_file_checks || f );
+ HDassert( H5F_addr_defined(addr) );
+ HDassert( first_flush_ptr );
+
+ /* If tgt_node_ptr is NULL, look up the target entry in the tree.
+ * If it doesn't exist, we are done.
+ */
+
+ if ( tgt_node_ptr == NULL ) {
+
+ search_target.addr = addr;
+ node_ptr = H5TB_dfind(cache_ptr->index_tree_ptr,
+ (void *)&search_target,
+ NULL);
+
+ } else {
+
+ node_ptr = tgt_node_ptr;
+ }
+
+ if ( node_ptr != NULL ) {
+
+ entry_ptr = (H5C_cache_entry_t *)(node_ptr->data);
+ HDassert( entry_ptr != NULL );
+ HDassert( entry_ptr->addr == addr );
+ HDassert( node_ptr->data == node_ptr->key );
+ }
+
+ if ( ( entry_ptr != NULL ) && ( entry_ptr->protected ) )
+ {
+ /* Attempt to flush a protected entry -- scream and die. */
+ HGOTO_ERROR(H5E_CACHE, H5E_PROTECT, FAIL, \
+ "Attempt to flush a protected entry.")
+ }
+
+ if ( ( entry_ptr != NULL ) &&
+ ( ( type_ptr == NULL ) || ( type_ptr->id == entry_ptr->type->id ) ) )
+ {
+ /* we have work to do */
+
+#ifdef H5_HAVE_PARALLEL
+#ifndef NDEBUG
+
+ /* If MPI based VFD is used, do special parallel I/O sanity checks.
+ * Note that we only do these sanity checks when the clear_only flag
+ * is not set, and the entry to be flushed is dirty. Don't bother
+ * otherwise as no file I/O can result.
+ *
+ * There are also cases (testing for instance) where it is convenient
+ * to pass in dummy dxpl_ids. Since we don't use the dxpl_ids directly,
+ * this isn't a problem -- but we do have to turn off sanity checks
+ * involving them. We use cache_ptr->skip_dxpl_id_checks to do this.
+ */
+ if ( ( ! cache_ptr->skip_dxpl_id_checks ) &&
+ ( ! clear_only ) &&
+ ( entry_ptr->dirty ) &&
+ ( IS_H5FD_MPI(f) ) ) {
+
+ H5P_genplist_t *dxpl; /* Dataset transfer property list */
+ H5FD_mpio_xfer_t xfer_mode; /* I/O xfer mode property value */
+
+ /* Get the dataset transfer property list */
+ if ( NULL == (dxpl = H5I_object(primary_dxpl_id)) ) {
+
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, \
+ "not a dataset creation property list")
+ }
+
+ /* Get the transfer mode property */
+ if( H5P_get(dxpl, H5D_XFER_IO_XFER_MODE_NAME, &xfer_mode) < 0 ) {
+
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, \
+ "can't retrieve xfer mode")
+ }
+
+ /* Sanity check transfer mode */
+ HDassert( xfer_mode == H5FD_MPIO_COLLECTIVE || IS_H5FD_FPHDF5(f) );
+ }
+
+#endif /* NDEBUG */
+#endif /* H5_HAVE_PARALLEL */
+ if ( clear_only ) {
+ H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr)
+ } else {
+ H5C__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr)
+ }
+
+ if ( destroy ) {
+ H5C__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr)
+ }
+
+ /* remove entry from tree if asked -- must do this now as the
+ * callback routines will free the entry if destroy is true.
+ */
+ if ( ( destroy ) && ( remove_entry_from_tree_on_destroy ) ) {
+ if ( H5C_remove_entry_from_tree(cache_ptr, entry_ptr, node_ptr)
+ < 0 ) {
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "Can't delete entry from tree.")
+ }
+ }
+
+ /* Update the replacement policy for the flush or eviction.
+ * Again, do this now so we don't have to reference freed
+ * memory in the destroy case.
+ */
+
+ if ( destroy ) { /* AKA eviction */
+ status = H5C_update_rp_for_eviction(cache_ptr, entry_ptr);
+ } else {
+ status = H5C_update_rp_for_flush(cache_ptr, entry_ptr);
+ }
+
+ if ( status < 0 ) {
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "Can't update replacement policy.")
+ }
+
+ /* Clear the dirty flag only, if requested */
+ if ( clear_only ) {
+ /* Call the callback routine to clear all dirty flags for object */
+ if ( (entry_ptr->type->clear)(f, entry_ptr, destroy) < 0 ) {
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "can't clear entry")
+ }
+ } else {
+
+ /* Only block for all the processes on the first piece of metadata
+ */
+
+ if ( *first_flush_ptr && entry_ptr->dirty ) {
+ status = (entry_ptr->type->flush)(f, primary_dxpl_id, destroy,
+ entry_ptr->addr, entry_ptr);
+ *first_flush_ptr = FALSE;
+ } else {
+ status = (entry_ptr->type->flush)(f, secondary_dxpl_id,
+ destroy, entry_ptr->addr,
+ entry_ptr);
+ }
+
+ if ( status < 0 ) {
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "unable to flush entry")
+ }
+ }
+
+ if ( ! destroy ) {
+
+ HDassert( !(entry_ptr->dirty) );
+ }
+ }
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C_flush_single_entry() */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C_insert_entry_in_tree
+ *
+ * Purpose: Insert the specified instance of H5C_cache_entry_t from the
+ * index tree in the specified instance of H5C_t. Update
+ * the associated length and size fields.
+ *
+ * Return: Non-negative on success/Negative on failure.
+ *
+ * Programmer: John Mainzer, 5/10/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static herr_t
+H5C_insert_entry_in_tree(H5C_t * cache_ptr,
+ H5C_cache_entry_t * entry_ptr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+ H5TB_NODE * node_ptr = NULL;
+
+ FUNC_ENTER_NOAPI(H5C_insert_entry_in_tree, FAIL)
+
+ HDassert( cache_ptr );
+ HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
+ HDassert( entry_ptr );
+ HDassert( entry_ptr->size > 0 );
+ HDassert( H5F_addr_defined(entry_ptr->addr) );
+
+ /* Don't bother to check if the entry is already in the tree -- if it
+ * is, H5TB_dins() will fail.
+ */
+ node_ptr = H5TB_dins(cache_ptr->index_tree_ptr, (void *)entry_ptr, NULL);
+
+ if ( node_ptr == NULL ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't insert entry in tree")
+
+ }
+
+ cache_ptr->index_len++;
+ cache_ptr->index_size += entry_ptr->size;
+ HDassert( cache_ptr->index_len > 0 );
+ HDassert( cache_ptr->index_size > 0 );
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C_insert_entry_in_tree */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C_load_entry
+ *
+ * Purpose: Attempt to load the entry at the specified disk address
+ * and with the specified type into memory. If successful.
+ * return the in memory address of the entry. Return NULL
+ * on failure.
+ *
+ * Note that this function simply loads the entry into
+ * core. It does not insert it into the cache.
+ *
+ * Return: Non-NULL on success / NULL on failure.
+ *
+ * Programmer: John Mainzer, 5/18/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void *
+H5C_load_entry(H5F_t * f,
+ hid_t dxpl_id,
+ const H5C_class_t * type,
+ haddr_t addr,
+ const void * udata1,
+ void * udata2,
+ hbool_t skip_file_checks)
+{
+ void * thing = NULL;
+ void * ret_value = NULL;
+ H5C_cache_entry_t * entry_ptr = NULL;
+
+ FUNC_ENTER_NOAPI(H5C_load_entry, NULL)
+
+ HDassert( skip_file_checks || f );
+ HDassert( type );
+ HDassert( type->load );
+ HDassert( type->size );
+ HDassert( H5F_addr_defined(addr) );
+
+ if ( NULL == (thing = (type->load)(f, dxpl_id, addr, udata1, udata2)) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "unable to load entry")
+
+ }
+
+ entry_ptr = (H5C_cache_entry_t *)thing;
+
+ HDassert( entry_ptr->dirty == FALSE );
+
+ entry_ptr->addr = addr;
+ entry_ptr->type = type;
+ entry_ptr->protected = FALSE;
+
+ if ( (type->size)(f, thing, &(entry_ptr->size)) < 0 ) {
+
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGETSIZE, NULL, \
+ "Can't get size of thing")
+ }
+
+ HDassert( entry_ptr->size < H5C_MAX_ENTRY_SIZE );
+
+ entry_ptr->next = NULL;
+ entry_ptr->prev = NULL;
+ entry_ptr->aux_next = NULL;
+ entry_ptr->aux_prev = NULL;
+
+ H5C__RESET_CACHE_ENTRY_STATS(entry_ptr);
+
+ ret_value = thing;
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C_load_entry() */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C_make_space_in_cache
+ *
+ * Purpose: Attempt to evict cache entries until the index_size
+ * is at least needed_space below max_cache_size.
+ *
+ * In passing, also attempt to bring cLRU_list_size to a
+ * value greater than min_clean_size.
+ *
+ * Depending on circumstances, both of these goals may
+ * be impossible, as in parallel mode, we must avoid generating
+ * a write as part of a read (to avoid deadlock in collective
+ * I/O), and in all cases, it is possible (though hopefully
+ * highly unlikely) that the protected list may exceed the
+ * maximum size of the cache.
+ *
+ * Thus the function simply does its best, returning success
+ * unless an error is encountered.
+ *
+ * The primary_dxpl_id and secondary_dxpl_id parameters
+ * specify the dxpl_ids used on the first write occasioned
+ * by the call (primary_dxpl_id), and on all subsequent
+ * writes (secondary_dxpl_id). This is useful in the metadata
+ * cache, but may not be needed elsewhere. If so, just use the
+ * same dxpl_id for both parameters.
+ *
+ * Observe that this function cannot occasion a read.
+ *
+ * Return: Non-negative on success/Negative on failure.
+ *
+ * Programmer: John Mainzer, 5/14/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static herr_t
+H5C_make_space_in_cache(H5F_t * f,
+ hid_t primary_dxpl_id,
+ hid_t secondary_dxpl_id,
+ H5C_t * cache_ptr,
+ size_t space_needed,
+ hbool_t write_permitted)
+{
+ hbool_t first_flush = TRUE;
+ herr_t ret_value = SUCCEED; /* Return value */
+ herr_t result;
+ int32_t entries_examined = 0;
+ int32_t initial_list_len;
+ H5C_cache_entry_t * entry_ptr;
+ H5C_cache_entry_t * prev_ptr;
+
+ FUNC_ENTER_NOAPI(H5C_make_space_in_cache, FAIL)
+
+ HDassert( cache_ptr );
+ HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
+
+ if ( write_permitted ) {
+
+ initial_list_len = cache_ptr->LRU_list_len;
+ entry_ptr = cache_ptr->LRU_tail_ptr;
+
+ while ( ( (cache_ptr->index_size + space_needed)
+ >
+ cache_ptr->max_cache_size
+ )
+ &&
+ ( entries_examined <= (2 * initial_list_len) )
+ &&
+ ( entry_ptr != NULL )
+ )
+ {
+ HDassert( ! (entry_ptr->protected) );
+
+ prev_ptr = entry_ptr->prev;
+
+ if ( entry_ptr->dirty ) {
+
+ result = H5C_flush_single_entry(f,
+ primary_dxpl_id,
+ secondary_dxpl_id,
+ cache_ptr,
+ entry_ptr->type,
+ entry_ptr->addr,
+ (unsigned)0,
+ NULL,
+ &first_flush,
+ FALSE);
+ } else {
+
+ result = H5C_flush_single_entry(f,
+ primary_dxpl_id,
+ secondary_dxpl_id,
+ cache_ptr,
+ entry_ptr->type,
+ entry_ptr->addr,
+ H5F_FLUSH_INVALIDATE,
+ NULL,
+ &first_flush,
+ TRUE);
+ }
+
+ if ( result < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "unable to flush entry")
+ }
+
+ entry_ptr = prev_ptr;
+ }
+
+ initial_list_len = cache_ptr->dLRU_list_len;
+ entry_ptr = cache_ptr->dLRU_tail_ptr;
+
+ while ( ( cache_ptr->cLRU_list_size < cache_ptr->min_clean_size ) &&
+ ( entries_examined <= initial_list_len ) &&
+ ( entry_ptr != NULL )
+ )
+ {
+ HDassert( ! (entry_ptr->protected) );
+ HDassert( entry_ptr->dirty );
+
+ prev_ptr = entry_ptr->aux_prev;
+
+ result = H5C_flush_single_entry(f,
+ primary_dxpl_id,
+ secondary_dxpl_id,
+ cache_ptr,
+ entry_ptr->type,
+ entry_ptr->addr,
+ (unsigned)0,
+ NULL,
+ &first_flush,
+ FALSE);
+
+ if ( result < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "unable to flush entry")
+ }
+
+ entry_ptr = prev_ptr;
+ }
+ } else {
+
+ initial_list_len = cache_ptr->cLRU_list_len;
+ entry_ptr = cache_ptr->cLRU_tail_ptr;
+
+ while ( ( (cache_ptr->index_size + space_needed)
+ >
+ cache_ptr->max_cache_size
+ )
+ &&
+ ( entries_examined <= initial_list_len )
+ &&
+ ( entry_ptr != NULL )
+ )
+ {
+ HDassert( ! (entry_ptr->protected) );
+ HDassert( ! (entry_ptr->dirty) );
+
+ prev_ptr = entry_ptr->aux_prev;
+
+ result = H5C_flush_single_entry(f,
+ primary_dxpl_id,
+ secondary_dxpl_id,
+ cache_ptr,
+ entry_ptr->type,
+ entry_ptr->addr,
+ H5F_FLUSH_INVALIDATE,
+ NULL,
+ &first_flush,
+ TRUE);
+
+ if ( result < 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "unable to flush entry")
+ }
+
+ entry_ptr = prev_ptr;
+ }
+ }
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C_make_space_in_cache() */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C_remove_entry_from_tree
+ *
+ * Purpose: Remove the specified instance of H5C_cache_entry_t from the
+ * index tree in the specified instance of H5C_t. Update
+ * the associated length and size fields.
+ *
+ * Return: Non-negative on success/Negative on failure.
+ *
+ * Programmer: John Mainzer, 5/10/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static herr_t
+H5C_remove_entry_from_tree(H5C_t * cache_ptr,
+ H5C_cache_entry_t * entry_ptr,
+ H5TB_NODE * node_ptr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5C_remove_entry_from_tree, FAIL)
+
+ HDassert( cache_ptr );
+ HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
+ HDassert( entry_ptr );
+ HDassert( node_ptr );
+ HDassert( node_ptr->data == entry_ptr );
+ HDassert( entry_ptr->size > 0 );
+
+ if ( entry_ptr->protected )
+ {
+ /* Attempt to delete a protected entry -- scream and die. */
+ HGOTO_ERROR(H5E_CACHE, H5E_PROTECT, FAIL, \
+ "Attempt to delete protected entry")
+ }
+
+ if ( H5TB_rem(&(cache_ptr->index_tree_ptr->root), node_ptr, NULL)
+ != entry_ptr ) {
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "Can't delete entry from tree.")
+ } else {
+ HDassert( cache_ptr->index_len > 0 );
+
+ cache_ptr->index_len--;
+
+ HDassert( cache_ptr->index_size >= entry_ptr->size );
+
+ cache_ptr->index_size -= entry_ptr->size;
+ }
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C_remove_entry_from_tree */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C_update_rp_for_eviction
+ *
+ * Purpose: Update the replacement policy data structures for an
+ * eviction of the specified cache entry.
+ *
+ * At present, we only support the modified LRU policy, so
+ * this function deals with that case unconditionally. If
+ * we ever support other replacement policies, the function
+ * should switch on the current policy and act accordingly.
+ *
+ * Return: Non-negative on success/Negative on failure.
+ *
+ * Programmer: John Mainzer, 5/10/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static herr_t
+H5C_update_rp_for_eviction(H5C_t * cache_ptr,
+ H5C_cache_entry_t * entry_ptr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5C_update_rp_for_eviction, FAIL)
+
+#if H5C_DO_SANITY_CHECKS
+
+ if ( ( cache_ptr == NULL ) ||
+ ( cache_ptr->magic != H5C__H5C_T_MAGIC ) ||
+ ( entry_ptr == NULL ) ||
+ ( entry_ptr->protected ) ||
+ ( entry_ptr->size <= 0 ) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "General sanity checks failed")
+ }
+
+#endif /* H5C_DO_SANITY_CHECKS */
+
+
+ /* modified LRU specific code */
+
+ /* remove the entry from the LRU list. */
+
+ H5C__DLL_REMOVE(entry_ptr, cache_ptr->LRU_head_ptr, \
+ cache_ptr->LRU_tail_ptr, cache_ptr->LRU_list_len, \
+ cache_ptr->LRU_list_size)
+
+ /* If the entry is clean when it is evicted, it should be on the
+ * clean LRU list, if it was dirty, it should be on the dirty LRU list.
+ * Remove it from the appropriate list according to the value of the
+ * dirty flag.
+ */
+
+ if ( entry_ptr->dirty ) {
+
+ H5C__AUX_DLL_REMOVE(entry_ptr, cache_ptr->dLRU_head_ptr, \
+ cache_ptr->dLRU_tail_ptr, \
+ cache_ptr->dLRU_list_len, \
+ cache_ptr->dLRU_list_size)
+ } else {
+ H5C__AUX_DLL_REMOVE(entry_ptr, cache_ptr->cLRU_head_ptr, \
+ cache_ptr->cLRU_tail_ptr, \
+ cache_ptr->cLRU_list_len, \
+ cache_ptr->cLRU_list_size)
+ }
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C_update_rp_for_eviction() */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C_update_rp_for_flush
+ *
+ * Purpose: Update the replacement policy data structures for a flush
+ * of the specified cache entry.
+ *
+ * At present, we only support the modified LRU policy, so
+ * this function deals with that case unconditionally. If
+ * we ever support other replacement policies, the function
+ * should switch on the current policy and act accordingly.
+ *
+ * Return: Non-negative on success/Negative on failure.
+ *
+ * Programmer: John Mainzer, 5/6/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static herr_t
+H5C_update_rp_for_flush(H5C_t * cache_ptr,
+ H5C_cache_entry_t * entry_ptr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5C_update_rp_for_flush, FAIL)
+
+#if H5C_DO_SANITY_CHECKS
+ if ( ( cache_ptr == NULL ) ||
+ ( cache_ptr->magic != H5C__H5C_T_MAGIC ) ||
+ ( entry_ptr == NULL ) ||
+ ( entry_ptr->protected ) ||
+ ( entry_ptr->size <= 0 ) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "General sanity checks failed")
+ }
+#endif /* H5C_DO_SANITY_CHECKS */
+
+ /* modified LRU specific code */
+
+ /* remove the entry from the LRU list, and re-insert it at the head. */
+
+ H5C__DLL_REMOVE(entry_ptr, cache_ptr->LRU_head_ptr, \
+ cache_ptr->LRU_tail_ptr, cache_ptr->LRU_list_len, \
+ cache_ptr->LRU_list_size)
+
+ H5C__DLL_PREPEND(entry_ptr, cache_ptr->LRU_head_ptr, \
+ cache_ptr->LRU_tail_ptr, cache_ptr->LRU_list_len, \
+ cache_ptr->LRU_list_size)
+
+ /* since the entry is being flushed or cleared, one would think that it
+ * must be dirty -- but that need not be the case. Use the dirty flag
+ * to infer whether the entry is on the clean or dirty LRU list, and
+ * remove it. Then insert it at the head of the clean LRU list.
+ *
+ * The function presumes that a dirty entry will be either cleared or
+ * flushed shortly, so it is OK if we put a dirty entry on the clean
+ * LRU list.
+ */
+
+ if ( entry_ptr->dirty ) {
+ H5C__AUX_DLL_REMOVE(entry_ptr, cache_ptr->dLRU_head_ptr, \
+ cache_ptr->dLRU_tail_ptr, \
+ cache_ptr->dLRU_list_len, \
+ cache_ptr->dLRU_list_size)
+ } else {
+ H5C__AUX_DLL_REMOVE(entry_ptr, cache_ptr->cLRU_head_ptr, \
+ cache_ptr->cLRU_tail_ptr, \
+ cache_ptr->cLRU_list_len, \
+ cache_ptr->cLRU_list_size)
+ }
+
+ H5C__AUX_DLL_PREPEND(entry_ptr, cache_ptr->cLRU_head_ptr, \
+ cache_ptr->cLRU_tail_ptr, cache_ptr->cLRU_list_len, \
+ cache_ptr->cLRU_list_size)
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C_update_rp_for_flush() */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C_update_rp_for_insertion
+ *
+ * Purpose: Update the replacement policy data structures for an
+ * insertion of the specified cache entry.
+ *
+ * At present, we only support the modified LRU policy, so
+ * this function deals with that case unconditionally. If
+ * we ever support other replacement policies, the function
+ * should switch on the current policy and act accordingly.
+ *
+ * Return: Non-negative on success/Negative on failure.
+ *
+ * Programmer: John Mainzer, 5/17/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static herr_t
+H5C_update_rp_for_insertion(H5C_t * cache_ptr,
+ H5C_cache_entry_t * entry_ptr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5C_update_rp_for_insertion, FAIL)
+
+#if H5C_DO_SANITY_CHECKS
+ if ( ( cache_ptr == NULL ) ||
+ ( cache_ptr->magic != H5C__H5C_T_MAGIC ) ||
+ ( entry_ptr == NULL ) ||
+ ( entry_ptr->protected ) ||
+ ( entry_ptr->size <= 0 ) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "General sanity checks failed")
+ }
+#endif /* H5C_DO_SANITY_CHECKS */
+
+ /* modified LRU specific code */
+
+ /* insert the entry at the head of the LRU list. */
+
+ H5C__DLL_PREPEND(entry_ptr, cache_ptr->LRU_head_ptr, \
+ cache_ptr->LRU_tail_ptr, cache_ptr->LRU_list_len, \
+ cache_ptr->LRU_list_size)
+
+ /* insert the entry at the head of the clean or dirty LRU list as
+ * appropriate.
+ */
+
+ if ( entry_ptr->dirty ) {
+ H5C__AUX_DLL_PREPEND(entry_ptr, cache_ptr->dLRU_head_ptr, \
+ cache_ptr->dLRU_tail_ptr, \
+ cache_ptr->dLRU_list_len, \
+ cache_ptr->dLRU_list_size)
+ } else {
+ H5C__AUX_DLL_PREPEND(entry_ptr, cache_ptr->cLRU_head_ptr, \
+ cache_ptr->cLRU_tail_ptr, \
+ cache_ptr->cLRU_list_len, \
+ cache_ptr->cLRU_list_size)
+ }
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C_update_rp_for_insertion() */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C_update_rp_for_protect
+ *
+ * Purpose: Update the replacement policy data structures for a
+ * protect of the specified cache entry.
+ *
+ * To do this, unlink the specified entry from any data
+ * structures used by the replacement policy, and add the
+ * entry to the protected list.
+ *
+ * At present, we only support the modified LRU policy, so
+ * this function deals with that case unconditionally. If
+ * we ever support other replacement policies, the function
+ * should switch on the current policy and act accordingly.
+ *
+ * Return: Non-negative on success/Negative on failure.
+ *
+ * Programmer: John Mainzer, 5/17/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static herr_t
+H5C_update_rp_for_protect(H5C_t * cache_ptr,
+ H5C_cache_entry_t * entry_ptr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5C_update_rp_for_protect, FAIL)
+
+#if H5C_DO_SANITY_CHECKS
+ if ( ( cache_ptr == NULL ) ||
+ ( cache_ptr->magic != H5C__H5C_T_MAGIC ) ||
+ ( entry_ptr == NULL ) ||
+ ( entry_ptr->protected ) ||
+ ( entry_ptr->size <= 0 ) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "General sanity checks failed")
+ }
+#endif /* H5C_DO_SANITY_CHECKS */
+
+ /* modified LRU specific code */
+
+ /* remove the entry from the LRU list. */
+
+ H5C__DLL_REMOVE(entry_ptr, cache_ptr->LRU_head_ptr, \
+ cache_ptr->LRU_tail_ptr, cache_ptr->LRU_list_len, \
+ cache_ptr->LRU_list_size)
+
+ /* Similarly, remove the entry from the clean or dirty LRU list
+ * as appropriate.
+ */
+
+ if ( entry_ptr->dirty ) {
+
+ H5C__AUX_DLL_REMOVE(entry_ptr, cache_ptr->dLRU_head_ptr, \
+ cache_ptr->dLRU_tail_ptr, \
+ cache_ptr->dLRU_list_len, \
+ cache_ptr->dLRU_list_size)
+
+ } else {
+
+ H5C__AUX_DLL_REMOVE(entry_ptr, cache_ptr->cLRU_head_ptr, \
+ cache_ptr->cLRU_tail_ptr, \
+ cache_ptr->cLRU_list_len, \
+ cache_ptr->cLRU_list_size)
+ }
+
+ /* End modified LRU specific code. */
+
+
+ /* Regardless of the replacement policy, now add the entry to the
+ * protected list.
+ */
+
+ H5C__DLL_APPEND(entry_ptr, cache_ptr->pl_head_ptr, cache_ptr->pl_tail_ptr, \
+ cache_ptr->pl_len, cache_ptr->pl_size)
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C_update_rp_for_protect() */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C_update_rp_for_rename
+ *
+ * Purpose: Update the replacement policy data structures for a
+ * rename of the specified cache entry.
+ *
+ * At present, we only support the modified LRU policy, so
+ * this function deals with that case unconditionally. If
+ * we ever support other replacement policies, the function
+ * should switch on the current policy and act accordingly.
+ *
+ * Return: Non-negative on success/Negative on failure.
+ *
+ * Programmer: John Mainzer, 5/17/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static herr_t
+H5C_update_rp_for_rename(H5C_t * cache_ptr,
+ H5C_cache_entry_t * entry_ptr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5C_update_rp_for_rename, FAIL)
+
+#if H5C_DO_SANITY_CHECKS
+ if ( ( cache_ptr == NULL ) ||
+ ( cache_ptr->magic != H5C__H5C_T_MAGIC ) ||
+ ( entry_ptr == NULL ) ||
+ ( entry_ptr->protected ) ||
+ ( entry_ptr->size <= 0 ) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "General sanity checks failed")
+ }
+#endif /* H5C_DO_SANITY_CHECKS */
+
+ /* modified LRU specific code */
+
+ /* remove the entry from the LRU list, and re-insert it at the head. */
+
+ H5C__DLL_REMOVE(entry_ptr, cache_ptr->LRU_head_ptr, \
+ cache_ptr->LRU_tail_ptr, cache_ptr->LRU_list_len, \
+ cache_ptr->LRU_list_size)
+
+ H5C__DLL_PREPEND(entry_ptr, cache_ptr->LRU_head_ptr, \
+ cache_ptr->LRU_tail_ptr, cache_ptr->LRU_list_len, \
+ cache_ptr->LRU_list_size)
+
+ /* move the entry to the head of either the clean or dirty LRU list
+ * as appropriate.
+ */
+
+ if ( entry_ptr->dirty ) {
+
+ H5C__AUX_DLL_REMOVE(entry_ptr, cache_ptr->dLRU_head_ptr, \
+ cache_ptr->dLRU_tail_ptr, \
+ cache_ptr->dLRU_list_len, \
+ cache_ptr->dLRU_list_size)
+
+ H5C__AUX_DLL_PREPEND(entry_ptr, cache_ptr->dLRU_head_ptr, \
+ cache_ptr->dLRU_tail_ptr, \
+ cache_ptr->dLRU_list_len, \
+ cache_ptr->dLRU_list_size)
+
+ } else {
+
+ H5C__AUX_DLL_REMOVE(entry_ptr, cache_ptr->cLRU_head_ptr, \
+ cache_ptr->cLRU_tail_ptr, \
+ cache_ptr->cLRU_list_len, \
+ cache_ptr->cLRU_list_size)
+
+ H5C__AUX_DLL_PREPEND(entry_ptr, cache_ptr->cLRU_head_ptr, \
+ cache_ptr->cLRU_tail_ptr, \
+ cache_ptr->cLRU_list_len, \
+ cache_ptr->cLRU_list_size)
+ }
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C_update_rp_for_rename() */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C_update_rp_for_unprotect
+ *
+ * Purpose: Update the replacement policy data structures for an
+ * unprotect of the specified cache entry.
+ *
+ * To do this, unlink the specified entry from the protected
+ * list, and re-insert it in the data structures used by the
+ * current replacement policy.
+ *
+ * At present, we only support the modified LRU policy, so
+ * this function deals with that case unconditionally. If
+ * we ever support other replacement policies, the function
+ * should switch on the current policy and act accordingly.
+ *
+ * Return: Non-negative on success/Negative on failure.
+ *
+ * Programmer: John Mainzer, 5/19/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static herr_t
+H5C_update_rp_for_unprotect(H5C_t * cache_ptr,
+ H5C_cache_entry_t * entry_ptr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5C_update_rp_for_unprotect, FAIL)
+
+#if H5C_DO_SANITY_CHECKS
+ if ( ( cache_ptr == NULL ) ||
+ ( cache_ptr->magic != H5C__H5C_T_MAGIC ) ||
+ ( entry_ptr == NULL ) ||
+ ( !(entry_ptr->protected) ) ||
+ ( entry_ptr->size <= 0 ) ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "General sanity checks failed")
+ }
+#endif /* H5C_DO_SANITY_CHECKS */
+
+ /* Regardless of the replacement policy, remove the entry from the
+ * protected list.
+ */
+ H5C__DLL_REMOVE(entry_ptr, cache_ptr->pl_head_ptr, \
+ cache_ptr->pl_tail_ptr, cache_ptr->pl_len, \
+ cache_ptr->pl_size)
+
+
+ /* modified LRU specific code */
+
+ /* insert the entry at the head of the LRU list. */
+
+ H5C__DLL_PREPEND(entry_ptr, cache_ptr->LRU_head_ptr, \
+ cache_ptr->LRU_tail_ptr, cache_ptr->LRU_list_len, \
+ cache_ptr->LRU_list_size)
+
+ /* Similarly, insert the entry at the head of either the clean or
+ * dirty LRU list as appropriate.
+ */
+
+ if ( entry_ptr->dirty ) {
+
+ H5C__AUX_DLL_PREPEND(entry_ptr, cache_ptr->dLRU_head_ptr, \
+ cache_ptr->dLRU_tail_ptr, \
+ cache_ptr->dLRU_list_len, \
+ cache_ptr->dLRU_list_size)
+
+ } else {
+
+ H5C__AUX_DLL_PREPEND(entry_ptr, cache_ptr->cLRU_head_ptr, \
+ cache_ptr->cLRU_tail_ptr, \
+ cache_ptr->cLRU_list_len, \
+ cache_ptr->cLRU_list_size)
+ }
+
+ /* End modified LRU specific code. */
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C_update_rp_for_unprotect() */
+
+
+/*************************************************************************/
+/*************************************************************************/
+/*************************************************************************/
+/********************************** END **********************************/
+/*************************************************************************/
+/*************************************************************************/
+/*************************************************************************/
+
diff --git a/src/H5Cprivate.h b/src/H5Cprivate.h
new file mode 100644
index 0000000..2a0d3db
--- /dev/null
+++ b/src/H5Cprivate.h
@@ -0,0 +1,374 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdf.ncsa.uiuc.edu/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from hdfhelp@ncsa.uiuc.edu. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*-------------------------------------------------------------------------
+ *
+ * Created: H5Cprivate.h
+ * 6/3/04
+ * John Mainzer
+ *
+ * Purpose: Constants and typedefs available to the rest of the
+ * library.
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#ifndef _H5Cprivate_H
+#define _H5Cprivate_H
+
+#include "H5Cpublic.h" /*public prototypes */
+
+/* Pivate headers needed by this header */
+#include "H5private.h" /* Generic Functions */
+#include "H5Fprivate.h" /* File access */
+
+#define H5C_DO_SANITY_CHECKS 0
+
+/* This sanity checking constant was picked out of the air. Increase
+ * or decrease it if appropriate. Its purposes is to detect corrupt
+ * object sizes, so it probably doesn't matter if it is a bit big.
+ *
+ * JRM - 5/17/04
+ */
+#define H5C_MAX_ENTRY_SIZE ((size_t)(100 * 1024))
+
+/* H5C_COLLECT_CACHE_STATS controls overall collection of statistics
+ * on cache activity. In general, this #define should be set to 0.
+ */
+#define H5C_COLLECT_CACHE_STATS 0
+
+/* H5C_COLLECT_CACHE_ENTRY_STATS controls collection of statistics
+ * in individual cache entries.
+ *
+ * H5C_COLLECT_CACHE_ENTRY_STATS should only be defined to true if
+ * H5C_COLLECT_CACHE_STATS is also defined to true.
+ */
+#if H5C_COLLECT_CACHE_STATS
+
+#define H5C_COLLECT_CACHE_ENTRY_STATS 1
+
+#else
+
+#define H5C_COLLECT_CACHE_ENTRY_STATS 0
+
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+/*
+ * Class methods pertaining to caching. Each type of cached object will
+ * have a constant variable with permanent life-span that describes how
+ * to cache the object. That variable will be of type H5C_class_t and
+ * have the following required fields...
+ *
+ * LOAD: Loads an object from disk to memory. The function
+ * should allocate some data structure and return it.
+ *
+ * FLUSH: Writes some data structure back to disk. It would be
+ * wise for the data structure to include dirty flags to
+ * indicate whether it really needs to be written. This
+ * function is also responsible for freeing memory allocated
+ * by the LOAD method if the DEST argument is non-zero (by
+ * calling the DEST method).
+ *
+ * DEST: Just frees memory allocated by the LOAD method.
+ *
+ * CLEAR: Just marks object as non-dirty.
+ *
+ * SIZE: Report the size (on disk) of the specified cache object.
+ * Note that the space allocated on disk may not be contiguous.
+ */
+
+typedef void *(*H5C_load_func_t)(H5F_t *f,
+ hid_t dxpl_id,
+ haddr_t addr,
+ const void *udata1,
+ void *udata2);
+typedef herr_t (*H5C_flush_func_t)(H5F_t *f,
+ hid_t dxpl_id,
+ hbool_t dest,
+ haddr_t addr,
+ void *thing);
+typedef herr_t (*H5C_dest_func_t)(H5F_t *f,
+ void *thing);
+typedef herr_t (*H5C_clear_func_t)(H5F_t *f,
+ void *thing,
+ hbool_t dest);
+typedef herr_t (*H5C_size_func_t)(H5F_t *f,
+ void *thing,
+ size_t *size_ptr);
+
+typedef struct H5C_class_t {
+ int id;
+ H5C_load_func_t load;
+ H5C_flush_func_t flush;
+ H5C_dest_func_t dest;
+ H5C_clear_func_t clear;
+ H5C_size_func_t size;
+} H5C_class_t;
+
+
+/* Type defintions of call back functions used by the cache as a whole */
+
+typedef herr_t (*H5C_write_permitted_func_t)(H5F_t *f,
+ hid_t dxpl_id,
+ hbool_t * write_permitted_ptr);
+
+
+/* Default max cache size and min clean size are give here to make
+ * them generally accessable.
+ */
+
+#define H5C__DEFAULT_MAX_CACHE_SIZE ((size_t)(2 * 1024 * 1024))
+#define H5C__DEFAULT_MIN_CLEAN_SIZE ((size_t)(1 * 1024 * 1024))
+
+
+/****************************************************************************
+ *
+ * structure H5C_cache_entry_t
+ *
+ * Instances of the H5C_cache_entry_t structure are used to store meta data
+ * cache entries in an a threaded binary B-tree. See H5TB.c for the
+ * particulars of the B-tree.
+ *
+ * In typical application, this structure is the first field in a
+ * structure to be cached. For historical reasons, the external module
+ * is responsible for managing the dirty field. All other fields are
+ * managed by the cache.
+ *
+ * Note that our current implementation of a threaded binary B-tree will
+ * occasionaly change the node a particular datum is associated with. Thus
+ * this structure does not have a back pointer to its B-tree node. If we
+ * ever modify the threaded binary B-tree code to fix this, a back pointer
+ * would save us a few tree traversals.
+ *
+ * The fields of this structure are discussed individually below:
+ *
+ * JRM - 4/26/04
+ *
+ * addr: Base address of the cache entry on disk.
+ *
+ * size: Length of the cache entry on disk. Note that unlike normal
+ * caches, the entries in this cache are of variable length.
+ * The entries should never overlap, and when we do writebacks,
+ * we will want to writeback adjacent entries where possible.
+ *
+ * type: Pointer to the instance of H5C_class_t containing pointers
+ * to the methods for cache entries of the current type. This
+ * field should be NULL when the instance of H5C_cache_entry_t
+ * is not in use.
+ *
+ * The name is not particularly descriptive, but is retained
+ * to avoid changes in existing code.
+ *
+ * dirty: Boolean flag indicating whether the contents of the cache
+ * entry has been modified since the last time it was written
+ * to disk.
+ *
+ * NOTE: For historical reasons, this field is not maintained
+ * by the cache. Instead, the module using the cache
+ * sets this flag when it modifies the entry, and the
+ * flush and clear functions supplied by that module
+ * reset the dirty when appropriate.
+ *
+ * This is a bit quirky, so we may want to change this
+ * someday. However it will require a change in the
+ * cache interface.
+ *
+ * protected: Boolean flag indicating whether this entry is protected
+ * (or locked, to use more conventional terms). When it is
+ * protected, the entry cannot be flushed or accessed until
+ * it is unprotected (or unlocked -- again to use more
+ * conventional terms).
+ *
+ * Note that protected entries are removed from the LRU lists
+ * and inserted on the protected list.
+ *
+ *
+ * Fields supporting replacement policies:
+ *
+ * The cache must have a replacement policy, and it will usually be
+ * necessary for this structure to contain fields supporting that policy.
+ *
+ * While there has been interest in several replacement policies for
+ * this cache, the initial development schedule is tight. Thus I have
+ * elected to support only a modified LRU policy for the first cut.
+ *
+ * When additional replacement policies are added, the fields in this
+ * section will be used in different ways or not at all. Thus the
+ * documentation of these fields is repeated for each replacement policy.
+ *
+ * Modified LRU:
+ *
+ * When operating in parallel mode, we must ensure that a read does not
+ * cause a write. If it does, the process will hang, as the write will
+ * be collective and the other processes will not know to participate.
+ *
+ * To deal with this issue, I have modified the usual LRU policy by adding
+ * clean and dirty LRU lists to the usual LRU list. When reading in
+ * parallel mode, we evict from the clean LRU list only. This implies
+ * that we must try to ensure that the clean LRU list is reasonably well
+ * stocked. See the comments on H5C_t in H5C.c for more details.
+ *
+ * Note that even if we start with a completely clean cache, a sequence
+ * of protects without unprotects can empty the clean LRU list. In this
+ * case, the cache must grow temporarily. At the next write, we will
+ * attempt to evict enough entries to get the cache down to its nominal
+ * maximum size.
+ *
+ * The use of the replacement policy fields under the Modified LRU policy
+ * is discussed below:
+ *
+ * next: Next pointer in either the LRU or the protected list,
+ * depending on the current value of protected. If there
+ * is no next entry on the list, this field should be set
+ * to NULL.
+ *
+ * prev: Prev pointer in either the LRU or the protected list,
+ * depending on the current value of protected. If there
+ * is no previous entry on the list, this field should be
+ * set to NULL.
+ *
+ * aux_next: Next pointer on either the clean or dirty LRU lists.
+ * This entry should be NULL when protected is true. When
+ * protected is false, and dirty is true, it should point
+ * to the next item on the dirty LRU list. When protected
+ * is false, and dirty is false, it should point to the
+ * next item on the clean LRU list. In either case, when
+ * there is no next item, it should be NULL.
+ *
+ * aux_prev: Previous pointer on either the clean or dirty LRU lists.
+ * This entry should be NULL when protected is true. When
+ * protected is false, and dirty is true, it should point
+ * to the previous item on the dirty LRU list. When protected
+ * is false, and dirty is false, it should point to the
+ * previous item on the clean LRU list. In either case, when
+ * there is no previous item, it should be NULL.
+ *
+ *
+ * Cache entry stats collection fields:
+ *
+ * These fields should only be compiled in when both H5C_COLLECT_CACHE_STATS
+ * and H5C_COLLECT_CACHE_ENTRY_STATS are true. When present, they allow
+ * collection of statistics on individual cache entries.
+ *
+ * accesses: int32_t containing the number of times this cache entry has
+ * been referenced in its lifetime.
+ *
+ * clears: int32_t containing the number of times this cache entry has
+ * been cleared in its life time.
+ *
+ * flushes: int32_t containing the number of times this cache entry has
+ * been flushed to file in its life time.
+ *
+ ****************************************************************************/
+
+typedef struct H5C_cache_entry_t
+{
+ haddr_t addr;
+ size_t size;
+ const H5C_class_t * type;
+ hbool_t dirty;
+ hbool_t protected;
+
+ /* fields supporting replacement policies: */
+
+ struct H5C_cache_entry_t * next;
+ struct H5C_cache_entry_t * prev;
+ struct H5C_cache_entry_t * aux_next;
+ struct H5C_cache_entry_t * aux_prev;
+
+#if H5C_COLLECT_CACHE_ENTRY_STATS
+
+ /* cache entry stats fields */
+
+ int32_t accesses;
+ int32_t clears;
+ int32_t flushes;
+
+#endif /* H5C_COLLECT_CACHE_ENTRY_STATS */
+
+} H5C_cache_entry_t;
+
+
+/* Typedef for the main structure for the cache (defined in H5C.c) */
+
+typedef struct H5C_t H5C_t;
+
+/*
+ * Library prototypes.
+ */
+H5_DLL H5C_t * H5C_create(size_t max_cache_size,
+ size_t min_clean_size,
+ int max_type_id,
+ const char * (* type_name_table_ptr)[],
+ H5C_write_permitted_func_t check_write_permitted);
+
+H5_DLL herr_t H5C_dest(H5F_t * f,
+ hid_t primary_dxpl_id,
+ hid_t secondary_dxpl_id,
+ H5C_t * cache_ptr);
+
+H5_DLL herr_t H5C_dest_empty(H5C_t * cache_ptr);
+
+H5_DLL herr_t H5C_flush_cache(H5F_t * f,
+ hid_t primary_dxpl_id,
+ hid_t secondary_dxpl_id,
+ H5C_t * cache_ptr,
+ unsigned flags);
+
+H5_DLL herr_t H5C_insert_entry(H5F_t * f,
+ hid_t primary_dxpl_id,
+ hid_t secondary_dxpl_id,
+ H5C_t * cache_ptr,
+ const H5C_class_t * type,
+ haddr_t addr,
+ void * thing);
+
+H5_DLL herr_t H5C_rename_entry(H5F_t * f,
+ H5C_t * cache_ptr,
+ const H5C_class_t * type,
+ haddr_t old_addr,
+ haddr_t new_addr);
+
+H5_DLL void * H5C_protect(H5F_t * f,
+ hid_t primary_dxpl_id,
+ hid_t secondary_dxpl_id,
+ H5C_t * cache_ptr,
+ const H5C_class_t * type,
+ haddr_t addr,
+ const void * udata1,
+ void * udata2);
+
+H5_DLL herr_t H5C_unprotect(H5F_t * f,
+ hid_t primary_dxpl_id,
+ hid_t secondary_dxpl_id,
+ H5C_t * cache_ptr,
+ const H5C_class_t * type,
+ haddr_t addr,
+ void * thing,
+ hbool_t deleted);
+
+H5_DLL herr_t H5C_stats(H5C_t * cache_ptr,
+ const char * cache_name,
+ hbool_t display_detailed_stats);
+
+void H5C_stats__reset(H5C_t * cache_ptr);
+
+H5_DLL herr_t H5C_set_skip_flags(H5C_t * cache_ptr,
+ hbool_t skip_file_checks,
+ hbool_t skip_dxpl_id_checks);
+
+#endif /* !_H5Cprivate_H */
+
diff --git a/src/H5Cpublic.h b/src/H5Cpublic.h
new file mode 100644
index 0000000..d4e82be
--- /dev/null
+++ b/src/H5Cpublic.h
@@ -0,0 +1,40 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdf.ncsa.uiuc.edu/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from hdfhelp@ncsa.uiuc.edu. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*-------------------------------------------------------------------------
+ *
+ * Created: H5Cproto.h
+ * June 4, 2005
+ * John Mainzer
+ *
+ * Purpose: Public include file for cache functions.
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef _H5Cpublic_H
+#define _H5Cpublic_H
+
+/* Public headers needed by this file */
+#include "H5public.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/src/H5E.c b/src/H5E.c
index 5128aa8..8ac0a86 100644
--- a/src/H5E.c
+++ b/src/H5E.c
@@ -120,6 +120,35 @@ static herr_t H5E_walk_cb(unsigned n, const H5E_error_t *err_desc, void *client
static herr_t H5E_get_auto(const H5E_t *estack, H5E_auto_t *func, void **client_data);
static herr_t H5E_set_auto(H5E_t *estack, H5E_auto_t func, void *client_data);
+
+/*-------------------------------------------------------------------------
+ * Function: H5E_init
+ *
+ * Purpose: Initialize the interface from some other layer.
+ *
+ * Return: Success: non-negative
+ *
+ * Failure: negative
+ *
+ * Programmer: Quincey Koziol
+ * Tuesday, June 29, 2004
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5E_init(void)
+{
+ herr_t ret_value=SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5E_init, FAIL)
+ /* FUNC_ENTER() does all the work */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+}
+
/*--------------------------------------------------------------------------
* Function: H5E_init_interface
*
diff --git a/src/H5Edefin.h b/src/H5Edefin.h
index 2d70e0d..dfd9714 100644
--- a/src/H5Edefin.h
+++ b/src/H5Edefin.h
@@ -90,8 +90,7 @@ hid_t H5E_NOFILTER_g = FAIL; /* Requested filter is not available */
hid_t H5E_CALLBACK_g = FAIL; /* Callback failed */
hid_t H5E_CANAPPLY_g = FAIL; /* Error from filter 'can apply' callback */
hid_t H5E_SETLOCAL_g = FAIL; /* Error from filter 'set local' callback */
-hid_t H5E_NOENCODER_g = FAIL; /* Filter present but encoder not enabled */
-hid_t H5E_NODECODER_g = FAIL; /* Filter present but decoder not enabled */
+hid_t H5E_NOENCODER_g = FAIL; /* Filter present but encoding disabled */
/* Datatype conversion errors */
hid_t H5E_CANTCONVERT_g = FAIL; /* Can't convert datatypes */
@@ -116,6 +115,7 @@ hid_t H5E_ALREADYEXISTS_g = FAIL; /* Object already exists */
hid_t H5E_CANTLOCK_g = FAIL; /* Unable to lock object */
hid_t H5E_CANTUNLOCK_g = FAIL; /* Unable to unlock object */
hid_t H5E_CANTGC_g = FAIL; /* Unable to garbage collect */
+hid_t H5E_CANTGETSIZE_g = FAIL; /* Unable to compute size */
/* Generic low-level file I/O errors */
hid_t H5E_SEEKERROR_g = FAIL; /* Seek failed */
@@ -151,6 +151,11 @@ hid_t H5E_CANTSERIALIZE_g = FAIL; /* Unable to serialize data from cache *
hid_t H5E_CANTLOAD_g = FAIL; /* Unable to load metadata into cache */
hid_t H5E_PROTECT_g = FAIL; /* Protected metadata error */
hid_t H5E_NOTCACHED_g = FAIL; /* Metadata not currently cached */
+hid_t H5E_SYSTEM_g = FAIL; /* Internal error detected */
+hid_t H5E_CANTINS_g = FAIL; /* Unable to insert metadata into cache */
+hid_t H5E_CANTRENAME_g = FAIL; /* Unable to rename metadata */
+hid_t H5E_CANTPROTECT_g = FAIL; /* Unable to protect metadata */
+hid_t H5E_CANTUNPROTECT_g = FAIL; /* Unable to unprotect metadata */
/* Group related errors */
hid_t H5E_CANTOPENOBJ_g = FAIL; /* Can't open object */
diff --git a/src/H5Einit.h b/src/H5Einit.h
index 92b7158..400f82c 100644
--- a/src/H5Einit.h
+++ b/src/H5Einit.h
@@ -321,15 +321,10 @@ if((msg = H5E_create_msg(cls, H5E_MINOR, "Error from filter 'set local' callback
if((H5E_SETLOCAL_g = H5I_register(H5I_ERROR_MSG, msg))<0)
HGOTO_ERROR(H5E_ERROR, H5E_CANTREGISTER, FAIL, "can't register error message")
assert(H5E_NOENCODER_g==(-1));
-if((msg = H5E_create_msg(cls, H5E_MINOR, "Error from filter 'no encoder' callback"))==NULL)
+if((msg = H5E_create_msg(cls, H5E_MINOR, "Filter present but encoding disabled"))==NULL)
HGOTO_ERROR(H5E_ERROR, H5E_CANTINIT, FAIL, "error message initialization failed")
if((H5E_NOENCODER_g = H5I_register(H5I_ERROR_MSG, msg))<0)
HGOTO_ERROR(H5E_ERROR, H5E_CANTREGISTER, FAIL, "can't register error message")
-assert(H5E_NODECODER_g==(-1));
-if((msg = H5E_create_msg(cls, H5E_MINOR, "Error from filter 'no decoder' callback"))==NULL)
- HGOTO_ERROR(H5E_ERROR, H5E_CANTINIT, FAIL, "error message initialization failed")
-if((H5E_NODECODER_g = H5I_register(H5I_ERROR_MSG, msg))<0)
- HGOTO_ERROR(H5E_ERROR, H5E_CANTREGISTER, FAIL, "can't register error message")
/* Datatype conversion errors */
assert(H5E_CANTCONVERT_g==(-1));
@@ -418,6 +413,11 @@ if((msg = H5E_create_msg(cls, H5E_MINOR, "Unable to garbage collect"))==NULL)
HGOTO_ERROR(H5E_ERROR, H5E_CANTINIT, FAIL, "error message initialization failed")
if((H5E_CANTGC_g = H5I_register(H5I_ERROR_MSG, msg))<0)
HGOTO_ERROR(H5E_ERROR, H5E_CANTREGISTER, FAIL, "can't register error message")
+assert(H5E_CANTGETSIZE_g==(-1));
+if((msg = H5E_create_msg(cls, H5E_MINOR, "Unable to compute size"))==NULL)
+ HGOTO_ERROR(H5E_ERROR, H5E_CANTINIT, FAIL, "error message initialization failed")
+if((H5E_CANTGETSIZE_g = H5I_register(H5I_ERROR_MSG, msg))<0)
+ HGOTO_ERROR(H5E_ERROR, H5E_CANTREGISTER, FAIL, "can't register error message")
/* Generic low-level file I/O errors */
assert(H5E_SEEKERROR_g==(-1));
@@ -561,6 +561,31 @@ if((msg = H5E_create_msg(cls, H5E_MINOR, "Metadata not currently cached"))==NULL
HGOTO_ERROR(H5E_ERROR, H5E_CANTINIT, FAIL, "error message initialization failed")
if((H5E_NOTCACHED_g = H5I_register(H5I_ERROR_MSG, msg))<0)
HGOTO_ERROR(H5E_ERROR, H5E_CANTREGISTER, FAIL, "can't register error message")
+assert(H5E_SYSTEM_g==(-1));
+if((msg = H5E_create_msg(cls, H5E_MINOR, "Internal error detected"))==NULL)
+ HGOTO_ERROR(H5E_ERROR, H5E_CANTINIT, FAIL, "error message initialization failed")
+if((H5E_SYSTEM_g = H5I_register(H5I_ERROR_MSG, msg))<0)
+ HGOTO_ERROR(H5E_ERROR, H5E_CANTREGISTER, FAIL, "can't register error message")
+assert(H5E_CANTINS_g==(-1));
+if((msg = H5E_create_msg(cls, H5E_MINOR, "Unable to insert metadata into cache"))==NULL)
+ HGOTO_ERROR(H5E_ERROR, H5E_CANTINIT, FAIL, "error message initialization failed")
+if((H5E_CANTINS_g = H5I_register(H5I_ERROR_MSG, msg))<0)
+ HGOTO_ERROR(H5E_ERROR, H5E_CANTREGISTER, FAIL, "can't register error message")
+assert(H5E_CANTRENAME_g==(-1));
+if((msg = H5E_create_msg(cls, H5E_MINOR, "Unable to rename metadata"))==NULL)
+ HGOTO_ERROR(H5E_ERROR, H5E_CANTINIT, FAIL, "error message initialization failed")
+if((H5E_CANTRENAME_g = H5I_register(H5I_ERROR_MSG, msg))<0)
+ HGOTO_ERROR(H5E_ERROR, H5E_CANTREGISTER, FAIL, "can't register error message")
+assert(H5E_CANTPROTECT_g==(-1));
+if((msg = H5E_create_msg(cls, H5E_MINOR, "Unable to protect metadata"))==NULL)
+ HGOTO_ERROR(H5E_ERROR, H5E_CANTINIT, FAIL, "error message initialization failed")
+if((H5E_CANTPROTECT_g = H5I_register(H5I_ERROR_MSG, msg))<0)
+ HGOTO_ERROR(H5E_ERROR, H5E_CANTREGISTER, FAIL, "can't register error message")
+assert(H5E_CANTUNPROTECT_g==(-1));
+if((msg = H5E_create_msg(cls, H5E_MINOR, "Unable to unprotect metadata"))==NULL)
+ HGOTO_ERROR(H5E_ERROR, H5E_CANTINIT, FAIL, "error message initialization failed")
+if((H5E_CANTUNPROTECT_g = H5I_register(H5I_ERROR_MSG, msg))<0)
+ HGOTO_ERROR(H5E_ERROR, H5E_CANTREGISTER, FAIL, "can't register error message")
/* Group related errors */
assert(H5E_CANTOPENOBJ_g==(-1));
diff --git a/src/H5Eprivate.h b/src/H5Eprivate.h
index 6f858f9..b874157 100644
--- a/src/H5Eprivate.h
+++ b/src/H5Eprivate.h
@@ -108,6 +108,7 @@ typedef struct H5E_print_t {
#define HGOTO_DONE(ret_val) {ret_value = ret_val; goto done;}
/* Library-private functions defined in H5E package */
+H5_DLL herr_t H5E_init(void);
H5_DLL herr_t H5E_push(H5E_t *estack, const char *file, const char *func, unsigned line,
hid_t cls_id, hid_t maj_id, hid_t min_id, const char *desc);
H5_DLL herr_t H5E_clear(H5E_t *estack);
diff --git a/src/H5Epubgen.h b/src/H5Epubgen.h
index 958e91c..a0938ae 100644
--- a/src/H5Epubgen.h
+++ b/src/H5Epubgen.h
@@ -147,14 +147,12 @@ H5_DLLVAR hid_t H5E_CANTALLOC_g; /* Can't allocate from file */
#define H5E_CALLBACK (H5OPEN H5E_CALLBACK_g)
#define H5E_CANAPPLY (H5OPEN H5E_CANAPPLY_g)
#define H5E_SETLOCAL (H5OPEN H5E_SETLOCAL_g)
-#define H5E_NOENCODER (H5OPEN H5E_NOENCODER_g)
-#define H5E_NODECODER (H5OPEN H5E_NODECODER_g)
+#define H5E_NOENCODER (H5OPEN H5E_NOENCODER_g)
H5_DLLVAR hid_t H5E_NOFILTER_g; /* Requested filter is not available */
H5_DLLVAR hid_t H5E_CALLBACK_g; /* Callback failed */
H5_DLLVAR hid_t H5E_CANAPPLY_g; /* Error from filter 'can apply' callback */
H5_DLLVAR hid_t H5E_SETLOCAL_g; /* Error from filter 'set local' callback */
-H5_DLLVAR hid_t H5E_NOENCODER_g; /* Filter present, but encoding disabled */
-H5_DLLVAR hid_t H5E_NODECODER_g; /* Filter present, but decoding disabled */
+H5_DLLVAR hid_t H5E_NOENCODER_g; /* Filter present but encoding disabled */
/* Datatype conversion errors */
#define H5E_CANTCONVERT (H5OPEN H5E_CANTCONVERT_g)
@@ -188,6 +186,7 @@ H5_DLLVAR hid_t H5E_CANTDELETE_g; /* Can't delete message */
#define H5E_CANTLOCK (H5OPEN H5E_CANTLOCK_g)
#define H5E_CANTUNLOCK (H5OPEN H5E_CANTUNLOCK_g)
#define H5E_CANTGC (H5OPEN H5E_CANTGC_g)
+#define H5E_CANTGETSIZE (H5OPEN H5E_CANTGETSIZE_g)
H5_DLLVAR hid_t H5E_NOSPACE_g; /* No space available for allocation */
H5_DLLVAR hid_t H5E_CANTCOPY_g; /* Unable to copy object */
H5_DLLVAR hid_t H5E_CANTFREE_g; /* Unable to free object */
@@ -195,6 +194,7 @@ H5_DLLVAR hid_t H5E_ALREADYEXISTS_g;/* Object already exists */
H5_DLLVAR hid_t H5E_CANTLOCK_g; /* Unable to lock object */
H5_DLLVAR hid_t H5E_CANTUNLOCK_g; /* Unable to unlock object */
H5_DLLVAR hid_t H5E_CANTGC_g; /* Unable to garbage collect */
+H5_DLLVAR hid_t H5E_CANTGETSIZE_g; /* Unable to compute size */
/* Generic low-level file I/O errors */
#define H5E_SEEKERROR (H5OPEN H5E_SEEKERROR_g)
@@ -252,11 +252,21 @@ H5_DLLVAR hid_t H5E_MOUNT_g; /* File mount error */
#define H5E_CANTLOAD (H5OPEN H5E_CANTLOAD_g)
#define H5E_PROTECT (H5OPEN H5E_PROTECT_g)
#define H5E_NOTCACHED (H5OPEN H5E_NOTCACHED_g)
+#define H5E_SYSTEM (H5OPEN H5E_SYSTEM_g)
+#define H5E_CANTINS (H5OPEN H5E_CANTINS_g)
+#define H5E_CANTRENAME (H5OPEN H5E_CANTRENAME_g)
+#define H5E_CANTPROTECT (H5OPEN H5E_CANTPROTECT_g)
+#define H5E_CANTUNPROTECT (H5OPEN H5E_CANTUNPROTECT_g)
H5_DLLVAR hid_t H5E_CANTFLUSH_g; /* Unable to flush data from cache */
H5_DLLVAR hid_t H5E_CANTSERIALIZE_g;/* Unable to serialize data from cache */
H5_DLLVAR hid_t H5E_CANTLOAD_g; /* Unable to load metadata into cache */
H5_DLLVAR hid_t H5E_PROTECT_g; /* Protected metadata error */
H5_DLLVAR hid_t H5E_NOTCACHED_g; /* Metadata not currently cached */
+H5_DLLVAR hid_t H5E_SYSTEM_g; /* Internal error detected */
+H5_DLLVAR hid_t H5E_CANTINS_g; /* Unable to insert metadata into cache */
+H5_DLLVAR hid_t H5E_CANTRENAME_g; /* Unable to rename metadata */
+H5_DLLVAR hid_t H5E_CANTPROTECT_g; /* Unable to protect metadata */
+H5_DLLVAR hid_t H5E_CANTUNPROTECT_g;/* Unable to unprotect metadata */
/* Group related errors */
#define H5E_CANTOPENOBJ (H5OPEN H5E_CANTOPENOBJ_g)
diff --git a/src/H5Eterm.h b/src/H5Eterm.h
index 2acd75d..6be8cb6 100644
--- a/src/H5Eterm.h
+++ b/src/H5Eterm.h
@@ -91,9 +91,8 @@ H5E_CANTALLOC_g=
H5E_NOFILTER_g=
H5E_CALLBACK_g=
H5E_CANAPPLY_g=
-H5E_SETLOCAL_g=
+H5E_SETLOCAL_g=
H5E_NOENCODER_g=
-H5E_NODECODER_g=
/* Datatype conversion errors */
H5E_CANTCONVERT_g=
@@ -117,7 +116,8 @@ H5E_CANTFREE_g=
H5E_ALREADYEXISTS_g=
H5E_CANTLOCK_g=
H5E_CANTUNLOCK_g=
-H5E_CANTGC_g=
+H5E_CANTGC_g=
+H5E_CANTGETSIZE_g=
/* Generic low-level file I/O errors */
H5E_SEEKERROR_g=
@@ -152,7 +152,12 @@ H5E_CANTFLUSH_g=
H5E_CANTSERIALIZE_g=
H5E_CANTLOAD_g=
H5E_PROTECT_g=
-H5E_NOTCACHED_g=
+H5E_NOTCACHED_g=
+H5E_SYSTEM_g=
+H5E_CANTINS_g=
+H5E_CANTRENAME_g=
+H5E_CANTPROTECT_g=
+H5E_CANTUNPROTECT_g=
/* Group related errors */
H5E_CANTOPENOBJ_g=
diff --git a/src/H5F.c b/src/H5F.c
index 2dc7880..32b1ee9 100644
--- a/src/H5F.c
+++ b/src/H5F.c
@@ -1838,6 +1838,7 @@ H5F_open(const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id, hid_t d
* We've just opened a fresh new file (or truncated one). We need
* to create & write the superblock.
*/
+
#ifdef H5_HAVE_FPHDF5
if (!H5FD_is_fphdf5_driver(lf) || H5FD_fphdf5_is_captain(lf)) {
#endif /* H5_HAVE_FPHDF5 */
@@ -1979,7 +1980,7 @@ H5F_open(const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id, hid_t d
if(fc_degree!=H5F_CLOSE_DEFAULT && fc_degree != shared->fc_degree)
HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, NULL, "file close degree doesn't match")
}
-
+
/* Success */
ret_value = file;
@@ -1987,6 +1988,7 @@ done:
if (!ret_value && file)
if(H5F_dest(file, dxpl_id)<0)
HDONE_ERROR(H5E_FILE, H5E_CANTCLOSEFILE, NULL, "problems closing file")
+
FUNC_LEAVE_NOAPI(ret_value)
}
@@ -3272,9 +3274,9 @@ H5F_close(H5F_t *f)
/* Only flush at this point if the file will be closed */
assert(closing);
/* Dump debugging info */
-#ifdef H5AC_DEBUG
+#if H5C_COLLECT_CACHE_STATS
H5AC_stats(f);
-#endif /* H5AC_DEBUG */
+#endif /* H5AC_COLLECT_CACHE_STATS */
/* Only try to flush the file if it was opened with write access */
if(f->intent&H5F_ACC_RDWR) {
diff --git a/src/H5Gnode.c b/src/H5Gnode.c
index 7fc436c..530020e 100644
--- a/src/H5Gnode.c
+++ b/src/H5Gnode.c
@@ -72,6 +72,7 @@ static herr_t H5G_node_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t a
H5G_node_t *sym);
static herr_t H5G_node_dest(H5F_t *f, H5G_node_t *sym);
static herr_t H5G_node_clear(H5F_t *f, H5G_node_t *sym, hbool_t destroy);
+static herr_t H5G_compute_size(H5F_t *f, H5G_node_t *sym, size_t *size_ptr);
/* B-tree callbacks */
static size_t H5G_node_sizeof_rkey(H5F_t *f, const void *_udata);
@@ -107,6 +108,7 @@ const H5AC_class_t H5AC_SNODE[1] = {{
(H5AC_flush_func_t)H5G_node_flush,
(H5AC_dest_func_t)H5G_node_dest,
(H5AC_clear_func_t)H5G_node_clear,
+ (H5AC_size_func_t)H5G_compute_size,
}};
/* H5G inherits B-tree like properties from H5B */
@@ -619,6 +621,43 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5G_compute_size
+ *
+ * Purpose: Compute the size in bytes of the specified instance of
+ * H5G_node_t on disk, and return it in *size_ptr. On failure
+ * the value of size_ptr is undefined.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 5/13/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5G_compute_size(H5F_t *f, H5G_node_t UNUSED *sym, size_t *size_ptr)
+{
+ herr_t ret_value=SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5G_compute_size, FAIL);
+
+ /*
+ * Check arguments.
+ */
+ assert(f);
+ assert(size_ptr);
+
+ *size_ptr = H5G_node_size(f);
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value);
+
+} /* H5G_compute_size() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5G_node_create
*
* Purpose: Creates a new empty symbol table node. This function is
diff --git a/src/H5HG.c b/src/H5HG.c
index 5fc4076..d72d617 100644
--- a/src/H5HG.c
+++ b/src/H5HG.c
@@ -120,7 +120,7 @@
/* Private typedefs */
/* PRIVATE PROTOTYPES */
-static H5HG_heap_t *H5HG_create(H5F_t *f, hid_t dxpl_id, size_t size);
+static haddr_t H5HG_create(H5F_t *f, hid_t dxpl_id, size_t size);
#ifdef NOT_YET
static void *H5HG_peek(H5F_t *f, hid_t dxpl_id, H5HG_t *hobj);
#endif /* NOT_YET */
@@ -132,6 +132,7 @@ static herr_t H5HG_flush(H5F_t *f, hid_t dxpl_id, hbool_t dest, haddr_t addr,
H5HG_heap_t *heap);
static herr_t H5HG_dest(H5F_t *f, H5HG_heap_t *heap);
static herr_t H5HG_clear(H5F_t *f, H5HG_heap_t *heap, hbool_t destroy);
+static herr_t H5HG_compute_size(H5F_t *f, H5HG_heap_t *heap, size_t *size_ptr);
/*
* H5HG inherits cache-like properties from H5AC
@@ -142,6 +143,7 @@ const H5AC_class_t H5AC_GHEAP[1] = {{
(H5AC_flush_func_t)H5HG_flush,
(H5AC_dest_func_t)H5HG_dest,
(H5AC_clear_func_t)H5HG_clear,
+ (H5AC_size_func_t)H5HG_compute_size,
}};
/* Declare a free list to manage the H5HG_t struct */
@@ -173,18 +175,27 @@ H5FL_BLK_DEFINE_STATIC(heap_chunk);
*
* Modifications:
*
+ * John Mainzer 5/26/04
+ * Modified function to return the disk address of the new
+ * global heap collection, or HADDR_UNDEF on failure. This
+ * is necessary, as in some cases (i.e. flexible parallel)
+ * H5AC_set() will imediately flush and destroy the in memory
+ * version of the new collection. For the same reason, I
+ * moved the code which places the new collection on the cwfs
+ * list to just before the call to H5AC_set().
+ *
*-------------------------------------------------------------------------
*/
-static H5HG_heap_t *
+static haddr_t
H5HG_create (H5F_t *f, hid_t dxpl_id, size_t size)
{
H5HG_heap_t *heap = NULL;
- H5HG_heap_t *ret_value = NULL;
+ haddr_t ret_value = HADDR_UNDEF;
uint8_t *p = NULL;
haddr_t addr;
size_t n;
- FUNC_ENTER_NOAPI(H5HG_create, NULL);
+ FUNC_ENTER_NOAPI(H5HG_create, HADDR_UNDEF);
/* Check args */
assert (f);
@@ -194,19 +205,24 @@ H5HG_create (H5F_t *f, hid_t dxpl_id, size_t size)
/* Create it */
H5_CHECK_OVERFLOW(size,size_t,hsize_t);
- if (HADDR_UNDEF==(addr=H5MF_alloc(f, H5FD_MEM_GHEAP, dxpl_id, (hsize_t)size)))
- HGOTO_ERROR (H5E_HEAP, H5E_CANTINIT, NULL, "unable to allocate file space for global heap");
+ if ( HADDR_UNDEF==
+ (addr=H5MF_alloc(f, H5FD_MEM_GHEAP, dxpl_id, (hsize_t)size)))
+ HGOTO_ERROR (H5E_HEAP, H5E_CANTINIT, HADDR_UNDEF, \
+ "unable to allocate file space for global heap");
if (NULL==(heap = H5FL_MALLOC (H5HG_heap_t)))
- HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed");
+ HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, HADDR_UNDEF, \
+ "memory allocation failed");
heap->addr = addr;
heap->size = size;
heap->cache_info.dirty = TRUE;
if (NULL==(heap->chunk = H5FL_BLK_MALLOC (heap_chunk,size)))
- HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed");
+ HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, HADDR_UNDEF, \
+ "memory allocation failed");
heap->nalloc = H5HG_NOBJS (f, size);
heap->next_idx = 1; /* skip index 0, which is used for the free object */
if (NULL==(heap->obj = H5FL_SEQ_MALLOC (H5HG_obj_t,heap->nalloc)))
- HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed");
+ HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, HADDR_UNDEF, \
+ "memory allocation failed");
/* Initialize the header */
HDmemcpy (heap->chunk, H5HG_MAGIC, H5HG_SIZEOF_MAGIC);
@@ -243,32 +259,42 @@ H5HG_create (H5F_t *f, hid_t dxpl_id, size_t size)
HDmemset (p, 0, (size_t)((heap->chunk+heap->size) - p));
#endif /* OLD_WAY */
- /* Add the heap to the cache */
- if (H5AC_set (f, dxpl_id, H5AC_GHEAP, addr, heap)<0)
- HGOTO_ERROR (H5E_HEAP, H5E_CANTINIT, NULL, "unable to cache global heap collection");
-
/* Add this heap to the beginning of the CWFS list */
if (NULL==f->shared->cwfs) {
f->shared->cwfs = H5MM_malloc (H5HG_NCWFS * sizeof(H5HG_heap_t*));
if (NULL==(f->shared->cwfs))
- HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed");
+ HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, HADDR_UNDEF, \
+ "memory allocation failed");
f->shared->cwfs[0] = heap;
f->shared->ncwfs = 1;
} else {
- HDmemmove (f->shared->cwfs+1, f->shared->cwfs, MIN (f->shared->ncwfs, H5HG_NCWFS-1)*sizeof(H5HG_heap_t*));
+ HDmemmove (f->shared->cwfs+1, f->shared->cwfs,
+ MIN (f->shared->ncwfs, H5HG_NCWFS-1)*sizeof(H5HG_heap_t*));
f->shared->cwfs[0] = heap;
f->shared->ncwfs = MIN (H5HG_NCWFS, f->shared->ncwfs+1);
}
- ret_value = heap;
+ /* Add the heap to the cache */
+ if (H5AC_set (f, dxpl_id, H5AC_GHEAP, addr, heap)<0)
+ HGOTO_ERROR (H5E_HEAP, H5E_CANTINIT, HADDR_UNDEF, \
+ "unable to cache global heap collection");
+
+ ret_value = addr;
done:
- if (!ret_value && heap) {
- if(H5HG_dest(f,heap)<0)
- HDONE_ERROR(H5E_HEAP, H5E_CANTFREE, NULL, "unable to destroy global heap collection");
+
+ if ( ( ! ( H5F_addr_defined(addr) ) ) && ( heap ) ) {
+
+ if ( H5HG_dest(f,heap) < 0 ) {
+
+ HDONE_ERROR(H5E_HEAP, H5E_CANTFREE, HADDR_UNDEF, \
+ "unable to destroy global heap collection");
+ }
}
+
FUNC_LEAVE_NOAPI(ret_value);
-}
+
+} /* H5HG_create() */
/*-------------------------------------------------------------------------
@@ -588,6 +614,41 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5HG_compute_size
+ *
+ * Purpose: Compute the size in bytes of the specified instance of
+ * H5HG_heap_t on disk, and return it in *len_ptr. On failure,
+ * the value of *len_ptr is undefined.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 5/13/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5HG_compute_size(H5F_t UNUSED *f, H5HG_heap_t *heap, size_t *size_ptr)
+{
+ herr_t ret_value=SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5HG_compute_size, FAIL);
+
+ /* Check arguments */
+ HDassert(heap);
+ HDassert(size_ptr);
+
+ *size_ptr = heap->size;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value);
+
+} /* H5HG_compute_size() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5HG_alloc
*
* Purpose: Given a heap with enough free space, this function will split
@@ -816,6 +877,23 @@ done:
*
* Modifications:
*
+ * John Mainzer -- 5/24/04
+ * The function used to modify the heap without protecting
+ * the relevant collection first. I did a half assed job
+ * of fixing the problem, which should hold until we try to
+ * support multi-threading. At that point it will have to
+ * be done right.
+ *
+ * See in line comment of this date for more details.
+ *
+ * John Mainzer - 5/26/04
+ * Modified H5HG_create() to return the disk address of the
+ * new collection, instead of the address of its
+ * representation in core. This was necessary as in FP
+ * mode, the cache will immediately flush and destroy any
+ * entry inserted in it via H5AC_set(). I then modified
+ * this function to account for the change in H5HG_create().
+ *
*-------------------------------------------------------------------------
*/
herr_t
@@ -824,6 +902,7 @@ H5HG_insert (H5F_t *f, hid_t dxpl_id, size_t size, void *obj, H5HG_t *hobj/*out*
size_t need; /*total space needed for object */
int cwfsno;
unsigned idx;
+ haddr_t addr = HADDR_UNDEF;
H5HG_heap_t *heap = NULL;
hbool_t found=0; /* Flag to indicate a heap with enough space was found */
herr_t ret_value=SUCCEED; /* Return value */
@@ -840,8 +919,36 @@ H5HG_insert (H5F_t *f, hid_t dxpl_id, size_t size, void *obj, H5HG_t *hobj/*out*
/* Find a large enough collection on the CWFS list */
need = H5HG_SIZEOF_OBJHDR(f) + H5HG_ALIGN(size);
+
+ /* Note that we don't have metadata cache locks on the entries in
+ * f->shared->cwfs.
+ *
+ * In the current situation, this doesn't matter, as we are single
+ * threaded, and as best I can tell, entries are added to and deleted
+ * from f->shared->cwfs as they are added to and deleted from the
+ * metadata cache.
+ *
+ * To be proper, we should either lock each entry in f->shared->cwfs
+ * as we examine it, or lock the whole array. However, at present
+ * I don't see the point as there will be significant overhead,
+ * and protecting and unprotecting all the collections in the global
+ * heap on a regular basis will skew the replacement policy.
+ *
+ * However, there is a bigger issue -- as best I can tell, we only look
+ * for free space in global heap chunks that are in cache. If we can't
+ * find any, we allocate a new chunk. This may be a problem in FP mode,
+ * as the metadata cache is disabled. Do we allocate a new heap
+ * collection for every entry in this case?
+ *
+ * Note that all this comes from a cursory read of the source. Don't
+ * take any of it as gospel.
+ * JRM - 5/24/04
+ */
+
for (cwfsno=0; cwfsno<f->shared->ncwfs; cwfsno++) {
if (f->shared->cwfs[cwfsno]->obj[0].size>=need) {
+
+ addr = f->shared->cwfs[cwfsno]->addr;
found=1;
break;
} /* end if */
@@ -856,6 +963,7 @@ H5HG_insert (H5F_t *f, hid_t dxpl_id, size_t size, void *obj, H5HG_t *hobj/*out*
if((f->shared->cwfs[cwfsno]->size+need)<=H5HG_MAXSIZE && H5MF_can_extend(f,H5FD_MEM_GHEAP,f->shared->cwfs[cwfsno]->addr,(hsize_t)f->shared->cwfs[cwfsno]->size,(hsize_t)need)) {
if(H5HG_extend(f,f->shared->cwfs[cwfsno],size)<0)
HGOTO_ERROR (H5E_HEAP, H5E_CANTINIT, FAIL, "unable to extend global heap collection");
+ addr = f->shared->cwfs[cwfsno]->addr;
found=1;
break;
} /* end if */
@@ -866,19 +974,23 @@ H5HG_insert (H5F_t *f, hid_t dxpl_id, size_t size, void *obj, H5HG_t *hobj/*out*
* If we didn't find any collection with enough free space then allocate a
* new collection large enough for the message plus the collection header.
*/
+
if (!found) {
- if (NULL==(heap=H5HG_create (f, dxpl_id, need+H5HG_SIZEOF_HDR (f))))
- HGOTO_ERROR (H5E_HEAP, H5E_CANTINIT, FAIL, "unable to allocate a global heap collection");
- assert (f->shared->ncwfs>0);
- assert (f->shared->cwfs[0]==heap);
- assert (f->shared->cwfs[0]->obj[0].size >= need);
+
+ addr = H5HG_create(f, dxpl_id, need+H5HG_SIZEOF_HDR (f));
+
+ if ( ! ( H5F_addr_defined(addr) ) ) {
+
+ HGOTO_ERROR (H5E_HEAP, H5E_CANTINIT, FAIL, \
+ "unable to allocate a global heap collection");
+ }
cwfsno = 0;
} /* end if */
else {
- /* Found a heap with enough space */
- heap = f->shared->cwfs[cwfsno];
- /* Move the collection forward in the CWFS list, if it's not already at the front */
+ /* Move the collection forward in the CWFS list, if it's not
+ * already at the front
+ */
if (cwfsno>0) {
H5HG_heap_t *tmp = f->shared->cwfs[cwfsno];
f->shared->cwfs[cwfsno] = f->shared->cwfs[cwfsno-1];
@@ -886,7 +998,15 @@ H5HG_insert (H5F_t *f, hid_t dxpl_id, size_t size, void *obj, H5HG_t *hobj/*out*
--cwfsno;
} /* end if */
} /* end else */
+
+ HDassert(H5F_addr_defined(addr));
+ if ( NULL == (heap = H5AC_protect(f, dxpl_id, H5AC_GHEAP,
+ addr, NULL, NULL, H5AC_WRITE)) ) {
+
+ HGOTO_ERROR (H5E_HEAP, H5E_CANTLOAD, FAIL, "unable to load heap");
+ }
+
/* Split the free space to make room for the new object */
idx = H5HG_alloc (f, heap, size);
assert (idx>0);
@@ -907,8 +1027,17 @@ H5HG_insert (H5F_t *f, hid_t dxpl_id, size_t size, void *obj, H5HG_t *hobj/*out*
hobj->idx = idx;
done:
+
+ if ( heap &&
+ H5AC_unprotect(f, dxpl_id, H5AC_GHEAP, heap->addr, heap, FALSE)
+ != SUCCEED ) {
+
+ HDONE_ERROR(H5E_HEAP, H5E_PROTECT, FAIL, "unable to unprotect heap.");
+ }
+
FUNC_LEAVE_NOAPI(ret_value);
-}
+
+} /* H5HG_insert() */
#ifdef NOT_YET
diff --git a/src/H5HL.c b/src/H5HL.c
index da9c68d..e45b617 100644
--- a/src/H5HL.c
+++ b/src/H5HL.c
@@ -72,6 +72,7 @@ static H5HL_t *H5HL_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *udat
static herr_t H5HL_flush(H5F_t *f, hid_t dxpl_id, hbool_t dest, haddr_t addr, H5HL_t *heap);
static herr_t H5HL_dest(H5F_t *f, H5HL_t *heap);
static herr_t H5HL_clear(H5F_t *f, H5HL_t *heap, hbool_t destroy);
+static herr_t H5HL_compute_size(H5F_t *f, H5HL_t *heap, size_t *size_ptr);
/*
* H5HL inherits cache-like properties from H5AC
@@ -82,6 +83,7 @@ const H5AC_class_t H5AC_LHEAP[1] = {{
(H5AC_flush_func_t)H5HL_flush,
(H5AC_dest_func_t)H5HL_dest,
(H5AC_clear_func_t)H5HL_clear,
+ (H5AC_size_func_t)H5HL_compute_size,
}};
/* Declare a free list to manage the H5HL_free_t struct */
@@ -669,6 +671,42 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5HL_compute_size
+ *
+ * Purpose: Compute the size in bytes of the specified instance of
+ * H5HL_t on disk, and return it in *len_ptr. On failure,
+ * the value of *len_ptr is undefined.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 5/13/04
+ *
+ * Modifications:
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5HL_compute_size(H5F_t *f, H5HL_t *heap, size_t *size_ptr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5HL_compute_size, FAIL);
+
+ /* check arguments */
+ HDassert(f);
+ HDassert(heap);
+ HDassert(size_ptr);
+
+ *size_ptr = H5HL_SIZEOF_HDR(f) + heap->disk_alloc;
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5HL_compute_size() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5HL_read
*
* Purpose: Reads some object (or part of an object) from the heap
diff --git a/src/H5O.c b/src/H5O.c
index 1989077..ba8033f 100644
--- a/src/H5O.c
+++ b/src/H5O.c
@@ -91,6 +91,7 @@ static H5O_t *H5O_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *_udata
static herr_t H5O_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5O_t *oh);
static herr_t H5O_dest(H5F_t *f, H5O_t *oh);
static herr_t H5O_clear(H5F_t *f, H5O_t *oh, hbool_t destroy);
+static herr_t H5O_compute_size(H5F_t *f, H5O_t *oh, size_t *size_ptr);
/* H5O inherits cache-like properties from H5AC */
static const H5AC_class_t H5AC_OHDR[1] = {{
@@ -99,6 +100,7 @@ static const H5AC_class_t H5AC_OHDR[1] = {{
(H5AC_flush_func_t)H5O_flush,
(H5AC_dest_func_t)H5O_dest,
(H5AC_clear_func_t)H5O_clear,
+ (H5AC_size_func_t)H5O_compute_size,
}};
/* ID to type mapping */
@@ -893,6 +895,60 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5O_compute_size
+ *
+ * Purpose: Compute the size in bytes of the specified instance of
+ * H5O_t on disk, and return it in *len_ptr. On failure,
+ * the value of *len_ptr is undefined.
+ *
+ * The value returned will probably be low unless the object
+ * has just been flushed, as we simply total up the size of
+ * the header with the sizes of the chunks. Thus any message
+ * that has been added since the last flush will not be
+ * reflected in the total.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 5/13/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5O_compute_size(H5F_t *f, H5O_t *oh, size_t *size_ptr)
+{
+ unsigned u;
+ size_t size;
+ herr_t ret_value=SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5O_compute_size, FAIL);
+
+ /* check args */
+ HDassert(f);
+ HDassert(oh);
+ HDassert(size_ptr);
+
+ size = H5O_SIZEOF_HDR(f);
+
+ for (u = 0; u < oh->nchunks; u++)
+ {
+ size += oh->chunk[u].size;
+ }
+
+ HDassert(size >= H5O_SIZEOF_HDR(f));
+
+ *size_ptr = size;
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value);
+
+} /* H5O_compute_size() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5O_reset
*
* Purpose: Some message data structures have internal fields that
diff --git a/src/H5Pdcpl.c b/src/H5Pdcpl.c
index 99f3e30..33edafc 100644
--- a/src/H5Pdcpl.c
+++ b/src/H5Pdcpl.c
@@ -883,7 +883,8 @@ H5Pget_filter_by_id(hid_t plist_id, H5Z_filter_t id, unsigned int *flags/*out*/,
name);
/* Check args */
- if (cd_nelmts || cd_values) {
+ if (cd_nelmts || cd_values)
+{
if (cd_nelmts && *cd_nelmts>256)
/*
* It's likely that users forget to initialize this on input, so
diff --git a/src/H5err.txt b/src/H5err.txt
index 802cccf..8b42b3c 100644
--- a/src/H5err.txt
+++ b/src/H5err.txt
@@ -107,6 +107,7 @@ MINOR, RESOURCE, H5E_ALREADYEXISTS, Object already exists
MINOR, RESOURCE, H5E_CANTLOCK, Unable to lock object
MINOR, RESOURCE, H5E_CANTUNLOCK, Unable to unlock object
MINOR, RESOURCE, H5E_CANTGC, Unable to garbage collect
+MINOR, RESOURCE, H5E_CANTGETSIZE, Unable to compute size
# File accessability errors
MINOR, FILEACC, H5E_FILEEXISTS, File already exists
@@ -146,6 +147,11 @@ MINOR, CACHE, H5E_CANTSERIALIZE, Unable to serialize data from cache
MINOR, CACHE, H5E_CANTLOAD, Unable to load metadata into cache
MINOR, CACHE, H5E_PROTECT, Protected metadata error
MINOR, CACHE, H5E_NOTCACHED, Metadata not currently cached
+MINOR, CACHE, H5E_SYSTEM, Internal error detected
+MINOR, CACHE, H5E_CANTINS, Unable to insert metadata into cache
+MINOR, CACHE, H5E_CANTRENAME, Unable to rename metadata
+MINOR, CACHE, H5E_CANTPROTECT, Unable to protect metadata
+MINOR, CACHE, H5E_CANTUNPROTECT, Unable to unprotect metadata
# B-tree related errors
MINOR, BTREE, H5E_NOTFOUND, Object not found
diff --git a/src/Makefile.in b/src/Makefile.in
index 4dcb74a..dc59c33 100644
--- a/src/Makefile.in
+++ b/src/Makefile.in
@@ -30,7 +30,7 @@ LIB=libhdf5.la
DISTCLEAN=libhdf5.settings
## Source and object files for the library (lexicographically)...
-LIB_SRC=H5.c H5A.c H5AC.c H5B.c H5D.c H5Dcontig.c H5Dcompact.c H5Dio.c \
+LIB_SRC=H5.c H5A.c H5AC.c H5B.c H5C.c H5D.c H5Dcontig.c H5Dcompact.c H5Dio.c \
H5Distore.c H5Dseq.c H5Dtest.c H5E.c H5F.c H5Fdbg.c H5FD.c \
H5FDcore.c H5FDfamily.c H5FDfphdf5.c H5FDgass.c H5FDlog.c H5FDmpi.c \
H5FDmpio.c H5FDmpiposix.c H5FDmulti.c H5FDsec2.c H5FDsrb.c \
@@ -54,23 +54,25 @@ LIB_OBJ=$(LIB_SRC:.c=.lo)
MOSTLYCLEAN=H5detect.o H5detect.lo H5detect H5Tinit.o H5Tinit.lo H5Tinit.c
## Public header files (to be installed)...
-PUB_HDR=H5public.h H5Apublic.h H5ACpublic.h H5Bpublic.h H5Dpublic.h \
- H5Epubgen.h H5Epublic.h H5Fpublic.h H5FDpublic.h H5FDcore.h \
- H5FDfamily.h H5FDfphdf5.h H5FDgass.h H5FDlog.h H5FDmpi.h H5FDmpio.h \
- H5FDmpiposix.h H5FDmulti.h H5FDsec2.h H5FDsrb.h H5FDstdio.h \
- H5FDstream.h H5FPpublic.h H5Gpublic.h H5HGpublic.h H5HLpublic.h \
- H5Ipublic.h H5MMpublic.h H5Opublic.h H5Ppublic.h H5Rpublic.h \
- H5Spublic.h H5Tpublic.h H5Zpublic.h H5pubconf.h hdf5.h H5api_adpt.h
+PUB_HDR=H5public.h H5Apublic.h H5ACpublic.h H5Bpublic.h H5Cpublic.h \
+ H5Dpublic.h H5Epubgen.h H5Epublic.h H5Fpublic.h H5FDpublic.h \
+ H5FDcore.h H5FDfamily.h H5FDfphdf5.h H5FDgass.h H5FDlog.h H5FDmpi.h \
+ H5FDmpio.h H5FDmpiposix.h H5FDmulti.h H5FDsec2.h H5FDsrb.h \
+ H5FDstdio.h H5FDstream.h H5FPpublic.h H5Gpublic.h H5HGpublic.h \
+ H5HLpublic.h H5Ipublic.h H5MMpublic.h H5Opublic.h H5Ppublic.h \
+ H5Rpublic.h H5Spublic.h H5Tpublic.h H5Zpublic.h H5pubconf.h hdf5.h \
+ H5api_adpt.h
## Other header files (not to be installed)...
PRIVATE_HDR=H5private.h H5Aprivate.h H5Apkg.h H5ACprivate.h H5Bprivate.h \
- H5Dprivate.h H5Edefin.h H5Einit.h H5Eprivate.h H5Eterm.h H5Fprivate.h \
- H5FDprivate.h H5FLprivate.h H5FOprivate.h H5FPprivate.h H5FSprivate.h \
- H5Gprivate.h H5Gpkg.h H5HGprivate.h H5HGpkg.h H5HLprivate.h H5HLpkg.h \
- H5HPprivate.h H5Iprivate.h H5MFprivate.h H5MMprivate.h H5Oprivate.h \
- H5Opkg.h H5Pprivate.h H5Ppkg.h H5Rprivate.h H5RSprivate.h \
- H5Sprivate.h H5STprivate.h H5Tprivate.h H5TBprivate.h H5Tpkg.h \
- H5TSprivate.h H5Vprivate.h H5Zprivate.h H5Zpkg.h H5config.h
+ H5Cprivate.h H5Dprivate.h H5Edefin.h H5Einit.h H5Eprivate.h H5Eterm.h \
+ H5Fprivate.h H5FDprivate.h H5FLprivate.h H5FOprivate.h H5FPprivate.h \
+ H5FSprivate.h H5Gprivate.h H5Gpkg.h H5HGprivate.h H5HGpkg.h \
+ H5HLprivate.h H5HLpkg.h H5HPprivate.h H5Iprivate.h H5MFprivate.h \
+ H5MMprivate.h H5Oprivate.h H5Opkg.h H5Pprivate.h H5Ppkg.h \
+ H5Rprivate.h H5RSprivate.h H5Sprivate.h H5STprivate.h H5Tprivate.h \
+ H5TBprivate.h H5Tpkg.h H5TSprivate.h H5Vprivate.h H5Zprivate.h \
+ H5Zpkg.h H5config.h
## Error header generation
##
diff --git a/test/Makefile.in b/test/Makefile.in
index 7121290..1fe85e5 100644
--- a/test/Makefile.in
+++ b/test/Makefile.in
@@ -27,15 +27,16 @@ TEST_SCRIPTS=$(srcdir)/testerror.sh
## These are our main targets. They should be listed in the order to be
## executed, generally most specific tests to least specific tests.
-TEST_PROGS=testhdf5 lheap ohdr stab gheap hyperslab istore bittests dtypes \
- dsets cmpd_dset extend external links unlink big mtime fillval mount \
- flush1 flush2 enum gass_write gass_read gass_append set_extent \
- srb_write srb_append srb_read ttsafe stream_test getname file_handle \
- ntypes dangle dtransform filename
+TEST_PROGS=testhdf5 lheap ohdr stab gheap cache hyperslab istore bittests \
+ dtypes dsets cmpd_dset extend external links unlink big mtime \
+ fillval mount flush1 flush2 enum gass_write gass_read gass_append \
+ set_extent srb_write srb_append srb_read ttsafe stream_test \
+ getname file_handle ntypes dangle dtransform filename
## Test programs for Error API. Only compile them but let testerror.sh run
## them to compare the output error messages with standard ones. 'make check'
## doesn't run them directly.
+
ERR_PROGS=error_test err_compat
PROGS=$(ERR_PROGS) $(TEST_PROGS)
@@ -77,7 +78,7 @@ CLEAN=$(TIMINGS)
## other source lists are for the individual tests, the files of which may
## overlap with other tests.
-TEST_SRC=big.c bittests.c cmpd_dset.c dsets.c dtypes.c extend.c \
+TEST_SRC=big.c bittests.c cache.c cmpd_dset.c dsets.c dtypes.c extend.c \
external.c fillval.c flush1.c flush2.c gheap.c h5test.c hyperslab.c \
istore.c lheap.c links.c mount.c mtime.c ohdr.c stab.c tarray.c \
tattr.c tconfig.c testhdf5.c testmeta.c tfile.c \
@@ -127,6 +128,9 @@ testhdf5: $(TESTHDF5_OBJ)
lheap: lheap.lo
@$(LT_LINK_EXE) $(CFLAGS) -o $@ lheap.lo $(LIB) $(LIBHDF5) $(LDFLAGS) $(LIBS)
+cache: cache.lo
+ @$(LT_LINK_EXE) $(CFLAGS) -o $@ cache.lo $(LIB) $(LIBHDF5) $(LDFLAGS) $(LIBS)
+
ohdr: ohdr.lo
@$(LT_LINK_EXE) $(CFLAGS) -o $@ ohdr.lo $(LIB) $(LIBHDF5) $(LDFLAGS) $(LIBS)
diff --git a/test/cache.c b/test/cache.c
new file mode 100644
index 0000000..fae8e0b
--- /dev/null
+++ b/test/cache.c
@@ -0,0 +1,4067 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdf.ncsa.uiuc.edu/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from hdfhelp@ncsa.uiuc.edu. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/* Programmer: John Mainzer
+ * 6/9/04
+ *
+ * This file contains tests for the cache implemented in
+ * H5C.c
+ */
+#include "h5test.h"
+#include "H5Iprivate.h"
+
+const char *FILENAME[] = {
+ "cache",
+ NULL
+};
+
+#include "H5TBprivate.h"
+#include "H5Cprivate.h"
+
+/* with apologies for the abuse of terminology... */
+
+#define PICO_ENTRY_TYPE 0
+#define NANO_ENTRY_TYPE 1
+#define MICRO_ENTRY_TYPE 2
+#define TINY_ENTRY_TYPE 3
+#define SMALL_ENTRY_TYPE 4
+#define MEDIUM_ENTRY_TYPE 5
+#define LARGE_ENTRY_TYPE 6
+#define HUGE_ENTRY_TYPE 7
+#define MONSTER_ENTRY_TYPE 8
+
+#define NUMBER_OF_ENTRY_TYPES 9
+
+#define PICO_ENTRY_SIZE (size_t)1
+#define NANO_ENTRY_SIZE (size_t)4
+#define MICRO_ENTRY_SIZE (size_t)16
+#define TINY_ENTRY_SIZE (size_t)64
+#define SMALL_ENTRY_SIZE (size_t)256
+#define MEDIUM_ENTRY_SIZE (size_t)1024
+#define LARGE_ENTRY_SIZE (size_t)(4 * 1024)
+#define HUGE_ENTRY_SIZE (size_t)(16 * 1024)
+#define MONSTER_ENTRY_SIZE (size_t)(64 * 1024)
+
+#define NUM_PICO_ENTRIES (10 * 1024)
+#define NUM_NANO_ENTRIES (10 * 1024)
+#define NUM_MICRO_ENTRIES (10 * 1024)
+#define NUM_TINY_ENTRIES (10 * 1024)
+#define NUM_SMALL_ENTRIES (10 * 1024)
+#define NUM_MEDIUM_ENTRIES (10 * 1024)
+#define NUM_LARGE_ENTRIES (10 * 1024)
+#define NUM_HUGE_ENTRIES (10 * 1024)
+#define NUM_MONSTER_ENTRIES (10 * 1024)
+
+#define MAX_ENTRIES (10 * 1024)
+
+#define PICO_BASE_ADDR (haddr_t)0
+#define NANO_BASE_ADDR (haddr_t)(PICO_BASE_ADDR + \
+ (PICO_ENTRY_SIZE * NUM_PICO_ENTRIES))
+#define MICRO_BASE_ADDR (haddr_t)(NANO_BASE_ADDR + \
+ (NANO_ENTRY_SIZE * NUM_NANO_ENTRIES))
+#define TINY_BASE_ADDR (haddr_t)(MICRO_BASE_ADDR + \
+ (MICRO_ENTRY_SIZE * NUM_MICRO_ENTRIES))
+#define SMALL_BASE_ADDR (haddr_t)(TINY_BASE_ADDR + \
+ (TINY_ENTRY_SIZE * NUM_TINY_ENTRIES))
+#define MEDIUM_BASE_ADDR (haddr_t)(SMALL_BASE_ADDR + \
+ (SMALL_ENTRY_SIZE * NUM_SMALL_ENTRIES))
+#define LARGE_BASE_ADDR (haddr_t)(MEDIUM_BASE_ADDR + \
+ (MEDIUM_ENTRY_SIZE * NUM_MEDIUM_ENTRIES))
+#define HUGE_BASE_ADDR (haddr_t)(LARGE_BASE_ADDR + \
+ (LARGE_ENTRY_SIZE * NUM_LARGE_ENTRIES))
+#define MONSTER_BASE_ADDR (haddr_t)(HUGE_BASE_ADDR + \
+ (HUGE_ENTRY_SIZE * NUM_HUGE_ENTRIES))
+
+#define PICO_ALT_BASE_ADDR (haddr_t)(MONSTER_BASE_ADDR + \
+ (MONSTER_ENTRY_SIZE * NUM_MONSTER_ENTRIES))
+#define NANO_ALT_BASE_ADDR (haddr_t)(PICO_ALT_BASE_ADDR + \
+ (PICO_ENTRY_SIZE * NUM_PICO_ENTRIES))
+#define MICRO_ALT_BASE_ADDR (haddr_t)(NANO_ALT_BASE_ADDR + \
+ (NANO_ENTRY_SIZE * NUM_NANO_ENTRIES))
+#define TINY_ALT_BASE_ADDR (haddr_t)(MICRO_ALT_BASE_ADDR + \
+ (MICRO_ENTRY_SIZE * NUM_MICRO_ENTRIES))
+#define SMALL_ALT_BASE_ADDR (haddr_t)(TINY_ALT_BASE_ADDR + \
+ (TINY_ENTRY_SIZE * NUM_TINY_ENTRIES))
+#define MEDIUM_ALT_BASE_ADDR (haddr_t)(SMALL_ALT_BASE_ADDR + \
+ (SMALL_ENTRY_SIZE * NUM_SMALL_ENTRIES))
+#define LARGE_ALT_BASE_ADDR (haddr_t)(MEDIUM_ALT_BASE_ADDR + \
+ (MEDIUM_ENTRY_SIZE * NUM_MEDIUM_ENTRIES))
+#define HUGE_ALT_BASE_ADDR (haddr_t)(LARGE_ALT_BASE_ADDR + \
+ (LARGE_ENTRY_SIZE * NUM_LARGE_ENTRIES))
+#define MONSTER_ALT_BASE_ADDR (haddr_t)(HUGE_ALT_BASE_ADDR + \
+ (HUGE_ENTRY_SIZE * NUM_HUGE_ENTRIES))
+
+typedef struct test_entry_t
+{
+ H5C_cache_entry_t header; /* entry data used by the cache
+ * -- must be first
+ */
+ struct test_entry_t * self; /* pointer to this entry -- used for
+ * sanity checking.
+ */
+ haddr_t addr; /* where the cache thinks this entry
+ * is located
+ */
+ hbool_t at_main_addr; /* boolean flag indicating whether
+ * the entry is supposed to be at
+ * either its main or alternate
+ * address.
+ */
+ haddr_t main_addr; /* initial location of the entry
+ */
+ haddr_t alt_addr; /* location to which the entry
+ * can be relocated or "renamed"
+ */
+ size_t size; /* how big the cache thinks this
+ * entry is
+ */
+ int32_t type; /* indicates which entry array this
+ * entry is in
+ */
+ int32_t index; /* index in its entry array
+ */
+ int32_t reads; /* number of times this entry has
+ * been loaded.
+ */
+ int32_t writes; /* number of times this entry has
+ * been written
+ */
+ hbool_t dirty; /* entry has been modified since
+ * last write
+ */
+ hbool_t protected; /* entry should currently be on
+ * the cache's protected list.
+ */
+} test_entry_t;
+
+
+/* The following is a local copy of the H5C_t structure -- any changes in
+ * that structure must be reproduced here. The typedef is used to allow
+ * local access to the cache's private data.
+ */
+
+#define H5C__H5C_T_MAGIC 0x005CAC0E
+#define H5C__MAX_NUM_TYPE_IDS 9
+
+typedef struct local_H5C_t
+{
+ uint32_t magic;
+
+ int32_t max_type_id;
+ const char * (* type_name_table_ptr)[];
+
+ size_t max_cache_size;
+ size_t min_clean_size;
+
+ H5C_write_permitted_func_t check_write_permitted;
+
+ int32_t index_len;
+ size_t index_size;
+ H5TB_TREE * index_tree_ptr;
+
+ int32_t pl_len;
+ size_t pl_size;
+ H5C_cache_entry_t * pl_head_ptr;
+ H5C_cache_entry_t * pl_tail_ptr;
+
+ int32_t LRU_list_len;
+ size_t LRU_list_size;
+ H5C_cache_entry_t * LRU_head_ptr;
+ H5C_cache_entry_t * LRU_tail_ptr;
+
+ int32_t cLRU_list_len;
+ size_t cLRU_list_size;
+ H5C_cache_entry_t * cLRU_head_ptr;
+ H5C_cache_entry_t * cLRU_tail_ptr;
+
+ int32_t dLRU_list_len;
+ size_t dLRU_list_size;
+ H5C_cache_entry_t * dLRU_head_ptr;
+ H5C_cache_entry_t * dLRU_tail_ptr;
+
+#if H5C_COLLECT_CACHE_STATS
+
+ /* stats fields */
+ int64_t hits[H5C__MAX_NUM_TYPE_IDS];
+ int64_t misses[H5C__MAX_NUM_TYPE_IDS];
+ int64_t insertions[H5C__MAX_NUM_TYPE_IDS];
+ int64_t clears[H5C__MAX_NUM_TYPE_IDS];
+ int64_t flushes[H5C__MAX_NUM_TYPE_IDS];
+ int64_t evictions[H5C__MAX_NUM_TYPE_IDS];
+ int64_t renames[H5C__MAX_NUM_TYPE_IDS];
+
+ int32_t max_index_len;
+ size_t max_index_size;
+
+ int32_t max_pl_len;
+ size_t max_pl_size;
+
+#if H5C_COLLECT_CACHE_ENTRY_STATS
+
+ int32_t max_accesses[H5C__MAX_NUM_TYPE_IDS];
+ int32_t min_accesses[H5C__MAX_NUM_TYPE_IDS];
+ int32_t max_clears[H5C__MAX_NUM_TYPE_IDS];
+ int32_t max_flushes[H5C__MAX_NUM_TYPE_IDS];
+
+#endif /* H5C_COLLECT_CACHE_ENTRY_STATS */
+
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ hbool_t skip_file_checks;
+ hbool_t skip_dxpl_id_checks;
+
+} local_H5C_t;
+
+
+/* global variable declarations: */
+
+static hbool_t write_permitted = TRUE;
+static hbool_t pass = TRUE; /* set to false on error */
+const char *failure_mssg = NULL;
+
+test_entry_t pico_entries[NUM_PICO_ENTRIES];
+test_entry_t nano_entries[NUM_NANO_ENTRIES];
+test_entry_t micro_entries[NUM_MICRO_ENTRIES];
+test_entry_t tiny_entries[NUM_TINY_ENTRIES];
+test_entry_t small_entries[NUM_SMALL_ENTRIES];
+test_entry_t medium_entries[NUM_MEDIUM_ENTRIES];
+test_entry_t large_entries[NUM_LARGE_ENTRIES];
+test_entry_t huge_entries[NUM_HUGE_ENTRIES];
+test_entry_t monster_entries[NUM_MONSTER_ENTRIES];
+
+test_entry_t * entries[NUMBER_OF_ENTRY_TYPES] =
+{
+ pico_entries,
+ nano_entries,
+ micro_entries,
+ tiny_entries,
+ small_entries,
+ medium_entries,
+ large_entries,
+ huge_entries,
+ monster_entries
+};
+
+const int32_t max_indices[NUMBER_OF_ENTRY_TYPES] =
+{
+ NUM_PICO_ENTRIES - 1,
+ NUM_NANO_ENTRIES - 1,
+ NUM_MICRO_ENTRIES - 1,
+ NUM_TINY_ENTRIES - 1,
+ NUM_SMALL_ENTRIES - 1,
+ NUM_MEDIUM_ENTRIES - 1,
+ NUM_LARGE_ENTRIES - 1,
+ NUM_HUGE_ENTRIES - 1,
+ NUM_MONSTER_ENTRIES - 1
+};
+
+const size_t entry_sizes[NUMBER_OF_ENTRY_TYPES] =
+{
+ PICO_ENTRY_SIZE,
+ NANO_ENTRY_SIZE,
+ MICRO_ENTRY_SIZE,
+ TINY_ENTRY_SIZE,
+ SMALL_ENTRY_SIZE,
+ MEDIUM_ENTRY_SIZE,
+ LARGE_ENTRY_SIZE,
+ HUGE_ENTRY_SIZE,
+ MONSTER_ENTRY_SIZE
+};
+
+const haddr_t base_addrs[NUMBER_OF_ENTRY_TYPES] =
+{
+ PICO_BASE_ADDR,
+ NANO_BASE_ADDR,
+ MICRO_BASE_ADDR,
+ TINY_BASE_ADDR,
+ SMALL_BASE_ADDR,
+ MEDIUM_BASE_ADDR,
+ LARGE_BASE_ADDR,
+ HUGE_BASE_ADDR,
+ MONSTER_BASE_ADDR
+};
+
+const haddr_t alt_base_addrs[NUMBER_OF_ENTRY_TYPES] =
+{
+ PICO_ALT_BASE_ADDR,
+ NANO_ALT_BASE_ADDR,
+ MICRO_ALT_BASE_ADDR,
+ TINY_ALT_BASE_ADDR,
+ SMALL_ALT_BASE_ADDR,
+ MEDIUM_ALT_BASE_ADDR,
+ LARGE_ALT_BASE_ADDR,
+ HUGE_ALT_BASE_ADDR,
+ MONSTER_ALT_BASE_ADDR
+};
+
+const char * entry_type_names[NUMBER_OF_ENTRY_TYPES] =
+{
+ "pico entries -- 1 B",
+ "nano entries -- 4 B",
+ "micro entries -- 16 B",
+ "tiny entries -- 64 B",
+ "small entries -- 256 B",
+ "medium entries -- 1 KB",
+ "large entries -- 4 KB",
+ "huge entries -- 16 KB",
+ "monster entries -- 64 KB"
+};
+
+
+/* call back function declarations: */
+
+static herr_t check_write_permitted(H5F_t UNUSED * f,
+ hid_t UNUSED dxpl_id,
+ hbool_t * write_permitted_ptr);
+
+static herr_t clear(H5F_t * f, void * thing, hbool_t dest);
+
+herr_t pico_clear(H5F_t * f, void * thing, hbool_t dest);
+herr_t nano_clear(H5F_t * f, void * thing, hbool_t dest);
+herr_t micro_clear(H5F_t * f, void * thing, hbool_t dest);
+herr_t tiny_clear(H5F_t * f, void * thing, hbool_t dest);
+herr_t small_clear(H5F_t * f, void * thing, hbool_t dest);
+herr_t medium_clear(H5F_t * f, void * thing, hbool_t dest);
+herr_t large_clear(H5F_t * f, void * thing, hbool_t dest);
+herr_t huge_clear(H5F_t * f, void * thing, hbool_t dest);
+herr_t monster_clear(H5F_t * f, void * thing, hbool_t dest);
+
+
+static herr_t destroy(H5F_t UNUSED * f, void * thing);
+
+herr_t pico_dest(H5F_t * f, void * thing);
+herr_t nano_dest(H5F_t * f, void * thing);
+herr_t micro_dest(H5F_t * f, void * thing);
+herr_t tiny_dest(H5F_t * f, void * thing);
+herr_t small_dest(H5F_t * f, void * thing);
+herr_t medium_dest(H5F_t * f, void * thing);
+herr_t large_dest(H5F_t * f, void * thing);
+herr_t huge_dest(H5F_t * f, void * thing);
+herr_t monster_dest(H5F_t * f, void * thing);
+
+
+static herr_t flush(H5F_t *f, hid_t UNUSED dxpl_id, hbool_t dest,
+ haddr_t addr, void *thing);
+
+herr_t pico_flush(H5F_t *f, hid_t dxpl_id, hbool_t dest,
+ haddr_t addr, void *thing);
+herr_t nano_flush(H5F_t *f, hid_t dxpl_id, hbool_t dest,
+ haddr_t addr, void *thing);
+herr_t micro_flush(H5F_t *f, hid_t dxpl_id, hbool_t dest,
+ haddr_t addr, void *thing);
+herr_t tiny_flush(H5F_t *f, hid_t dxpl_id, hbool_t dest,
+ haddr_t addr, void *thing);
+herr_t small_flush(H5F_t *f, hid_t dxpl_id, hbool_t dest,
+ haddr_t addr, void *thing);
+herr_t medium_flush(H5F_t *f, hid_t dxpl_id, hbool_t dest,
+ haddr_t addr, void *thing);
+herr_t large_flush(H5F_t *f, hid_t dxpl_id, hbool_t dest,
+ haddr_t addr, void *thing);
+herr_t huge_flush(H5F_t *f, hid_t dxpl_id, hbool_t dest,
+ haddr_t addr, void *thing);
+herr_t monster_flush(H5F_t *f, hid_t dxpl_id, hbool_t dest,
+ haddr_t addr, void *thing);
+
+
+static void * load(H5F_t UNUSED *f, hid_t UNUSED dxpl_id, haddr_t addr,
+ const void UNUSED *udata1, void UNUSED *udata2);
+
+void * pico_load(H5F_t *f, hid_t dxpl_id, haddr_t addr,
+ const void *udata1, void *udata2);
+void * nano_load(H5F_t *f, hid_t dxpl_id, haddr_t addr,
+ const void *udata1, void *udata2);
+void * micro_load(H5F_t *f, hid_t dxpl_id, haddr_t addr,
+ const void *udata1, void *udata2);
+void * tiny_load(H5F_t *f, hid_t dxpl_id, haddr_t addr,
+ const void *udata1, void *udata2);
+void * small_load(H5F_t *f, hid_t dxpl_id, haddr_t addr,
+ const void *udata1, void *udata2);
+void * medium_load(H5F_t *f, hid_t dxpl_id, haddr_t addr,
+ const void *udata1, void *udata2);
+void * large_load(H5F_t *f, hid_t dxpl_id, haddr_t addr,
+ const void *udata1, void *udata2);
+void * huge_load(H5F_t *f, hid_t dxpl_id, haddr_t addr,
+ const void *udata1, void *udata2);
+void * monster_load(H5F_t *f, hid_t dxpl_id, haddr_t addr,
+ const void *udata1, void *udata2);
+
+
+static herr_t size(H5F_t UNUSED * f, void * thing, size_t * size_ptr);
+
+herr_t pico_size(H5F_t * f, void * thing, size_t * size_ptr);
+herr_t nano_size(H5F_t * f, void * thing, size_t * size_ptr);
+herr_t micro_size(H5F_t * f, void * thing, size_t * size_ptr);
+herr_t tiny_size(H5F_t * f, void * thing, size_t * size_ptr);
+herr_t small_size(H5F_t * f, void * thing, size_t * size_ptr);
+herr_t medium_size(H5F_t * f, void * thing, size_t * size_ptr);
+herr_t large_size(H5F_t * f, void * thing, size_t * size_ptr);
+herr_t huge_size(H5F_t * f, void * thing, size_t * size_ptr);
+herr_t monster_size(H5F_t * f, void * thing, size_t * size_ptr);
+
+
+/* callback table declaration */
+
+static const H5C_class_t types[NUMBER_OF_ENTRY_TYPES] =
+{
+ {
+ PICO_ENTRY_TYPE,
+ (H5C_load_func_t)pico_load,
+ (H5C_flush_func_t)pico_flush,
+ (H5C_dest_func_t)pico_dest,
+ (H5C_clear_func_t)pico_clear,
+ (H5C_size_func_t)pico_size
+ },
+ {
+ NANO_ENTRY_TYPE,
+ (H5C_load_func_t)nano_load,
+ (H5C_flush_func_t)nano_flush,
+ (H5C_dest_func_t)nano_dest,
+ (H5C_clear_func_t)nano_clear,
+ (H5C_size_func_t)nano_size
+ },
+ {
+ MICRO_ENTRY_TYPE,
+ (H5C_load_func_t)micro_load,
+ (H5C_flush_func_t)micro_flush,
+ (H5C_dest_func_t)micro_dest,
+ (H5C_clear_func_t)micro_clear,
+ (H5C_size_func_t)micro_size
+ },
+ {
+ TINY_ENTRY_TYPE,
+ (H5C_load_func_t)tiny_load,
+ (H5C_flush_func_t)tiny_flush,
+ (H5C_dest_func_t)tiny_dest,
+ (H5C_clear_func_t)tiny_clear,
+ (H5C_size_func_t)tiny_size
+ },
+ {
+ SMALL_ENTRY_TYPE,
+ (H5C_load_func_t)small_load,
+ (H5C_flush_func_t)small_flush,
+ (H5C_dest_func_t)small_dest,
+ (H5C_clear_func_t)small_clear,
+ (H5C_size_func_t)small_size
+ },
+ {
+ MEDIUM_ENTRY_TYPE,
+ (H5C_load_func_t)medium_load,
+ (H5C_flush_func_t)medium_flush,
+ (H5C_dest_func_t)medium_dest,
+ (H5C_clear_func_t)medium_clear,
+ (H5C_size_func_t)medium_size
+ },
+ {
+ LARGE_ENTRY_TYPE,
+ (H5C_load_func_t)large_load,
+ (H5C_flush_func_t)large_flush,
+ (H5C_dest_func_t)large_dest,
+ (H5C_clear_func_t)large_clear,
+ (H5C_size_func_t)large_size
+ },
+ {
+ HUGE_ENTRY_TYPE,
+ (H5C_load_func_t)huge_load,
+ (H5C_flush_func_t)huge_flush,
+ (H5C_dest_func_t)huge_dest,
+ (H5C_clear_func_t)huge_clear,
+ (H5C_size_func_t)huge_size
+ },
+ {
+ MONSTER_ENTRY_TYPE,
+ (H5C_load_func_t)monster_load,
+ (H5C_flush_func_t)monster_flush,
+ (H5C_dest_func_t)monster_dest,
+ (H5C_clear_func_t)monster_clear,
+ (H5C_size_func_t)monster_size
+ }
+};
+
+
+/* private function declarations: */
+
+static void addr_to_type_and_index(haddr_t addr,
+ int32_t * type_ptr,
+ int32_t * index_ptr);
+
+#if 0 /* keep this for a while -- it may be useful */
+static haddr_t type_and_index_to_addr(int32_t type,
+ int32_t idx);
+#endif
+
+static void insert_entry(H5C_t * cache_ptr,
+ int32_t type,
+ int32_t idx,
+ hbool_t dirty);
+
+static void rename_entry(H5C_t * cache_ptr,
+ int32_t type,
+ int32_t idx,
+ hbool_t main_addr);
+
+static void protect_entry(H5C_t * cache_ptr,
+ int32_t type,
+ int32_t idx);
+
+hbool_t entry_in_cache(H5C_t * cache_ptr,
+ int32_t type,
+ int32_t idx);
+
+static void reset_entries(void);
+
+static H5C_t * setup_cache(size_t max_cache_size, size_t min_clean_size);
+
+static void row_major_scan_forward(H5C_t * cache_ptr,
+ int32_t lag,
+ hbool_t verbose,
+ hbool_t reset_stats,
+ hbool_t display_stats,
+ hbool_t display_detailed_stats,
+ hbool_t do_inserts,
+ hbool_t dirty_inserts,
+ hbool_t do_renames,
+ hbool_t rename_to_main_addr,
+ hbool_t do_destroys,
+ int dirty_destroys,
+ int dirty_unprotects);
+
+static void row_major_scan_backward(H5C_t * cache_ptr,
+ int32_t lag,
+ hbool_t verbose,
+ hbool_t reset_stats,
+ hbool_t display_stats,
+ hbool_t display_detailed_stats,
+ hbool_t do_inserts,
+ hbool_t dirty_inserts,
+ hbool_t do_renames,
+ hbool_t rename_to_main_addr,
+ hbool_t do_destroys,
+ int dirty_destroys,
+ int dirty_unprotects);
+
+static void col_major_scan_forward(H5C_t * cache_ptr,
+ int32_t lag,
+ hbool_t verbose,
+ hbool_t reset_stats,
+ hbool_t display_stats,
+ hbool_t display_detailed_stats,
+ hbool_t do_inserts,
+ hbool_t dirty_inserts,
+ int dirty_unprotects);
+
+static void col_major_scan_backward(H5C_t * cache_ptr,
+ int32_t lag,
+ hbool_t verbose,
+ hbool_t reset_stats,
+ hbool_t display_stats,
+ hbool_t display_detailed_stats,
+ hbool_t do_inserts,
+ hbool_t dirty_inserts,
+ int dirty_unprotects);
+
+static void smoke_check_1(void);
+static void smoke_check_2(void);
+static void smoke_check_3(void);
+static void smoke_check_4(void);
+static void write_permitted_check(void);
+static void check_flush_protected_err(void);
+static void check_destroy_protected_err(void);
+static void check_duplicate_insert_err(void);
+static void check_rename_err(void);
+static void check_double_protect_err(void);
+static void check_double_unprotect_err(void);
+
+static void takedown_cache(H5C_t * cache_ptr,
+ hbool_t dump_stats,
+ hbool_t dump_detailed_stats);
+
+static void flush_cache(H5C_t * cache_ptr,
+ hbool_t destroy_entries,
+ hbool_t dump_stats,
+ hbool_t dump_detailed_stats);
+
+static void unprotect_entry(H5C_t * cache_ptr,
+ int32_t type,
+ int32_t idx,
+ int dirty,
+ hbool_t deleted);
+
+static void verify_clean(void);
+
+static void verify_unprotected(void);
+
+
+
+/* address translation funtions: */
+
+/*-------------------------------------------------------------------------
+ * Function: addr_to_type_and_index
+ *
+ * Purpose: Given an address, compute the type and index of the
+ * associated entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 6/10/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+static void
+addr_to_type_and_index(haddr_t addr,
+ int32_t * type_ptr,
+ int32_t * index_ptr)
+{
+ int i;
+ int32_t type;
+ int32_t idx;
+
+ HDassert( type_ptr );
+ HDassert( index_ptr );
+
+ /* we only have a small number of entry types, so just do a
+ * linear search. If NUMBER_OF_ENTRY_TYPES grows, we may want
+ * to do a binary search instead.
+ */
+ i = 1;
+ if ( addr >= PICO_ALT_BASE_ADDR ) {
+
+ while ( ( i < NUMBER_OF_ENTRY_TYPES ) &&
+ ( addr >= alt_base_addrs[i] ) )
+ {
+ i++;
+ }
+
+ } else {
+
+ while ( ( i < NUMBER_OF_ENTRY_TYPES ) &&
+ ( addr >= base_addrs[i] ) )
+ {
+ i++;
+ }
+ }
+
+ type = i - 1;
+
+ HDassert( ( type >= 0 ) && ( type < NUMBER_OF_ENTRY_TYPES ) );
+
+ if ( addr >= PICO_ALT_BASE_ADDR ) {
+
+ idx = (addr - alt_base_addrs[type]) / entry_sizes[type];
+ HDassert( !((entries[type])[idx].at_main_addr) );
+ HDassert( addr == (entries[type])[idx].alt_addr );
+
+ } else {
+
+ idx = (addr - base_addrs[type]) / entry_sizes[type];
+ HDassert( (entries[type])[idx].at_main_addr );
+ HDassert( addr == (entries[type])[idx].main_addr );
+ }
+
+ HDassert( ( idx >= 0 ) && ( idx <= max_indices[type] ) );
+
+ HDassert( addr == (entries[type])[idx].addr );
+
+ *type_ptr = type;
+ *index_ptr = idx;
+
+ return;
+
+} /* addr_to_type_and_index() */
+
+
+#if 0 /* This function has never been used, but we may want it
+ * some time. Lets keep it for now.
+ */
+/*-------------------------------------------------------------------------
+ * Function: type_and_index_to_addr
+ *
+ * Purpose: Given a type and index of an entry, compute the associated
+ * addr and return that value.
+ *
+ * Return: computed addr
+ *
+ * Programmer: John Mainzer
+ * 6/10/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+static haddr_t
+type_and_index_to_addr(int32_t type,
+ int32_t idx)
+{
+ haddr_t addr;
+
+ HDassert( ( type >= 0 ) && ( type < NUMBER_OF_ENTRY_TYPES ) );
+ HDassert( ( idx >= 0 ) && ( idx <= max_indices[type] ) );
+
+ addr = base_addrs[type] + (((haddr_t)idx) * entry_sizes[type]);
+
+ HDassert( addr == (entries[type])[idx].addr );
+
+ if ( (entries[type])[idx].at_main_addr ) {
+
+ HDassert( addr == (entries[type])[idx].main_addr );
+
+ } else {
+
+ HDassert( addr == (entries[type])[idx].alt_addr );
+ }
+
+ return(addr);
+
+} /* type_and_index_to_addr() */
+
+#endif
+
+
+/* Call back functions: */
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5AC_check_if_write_permitted
+ *
+ * Purpose: Determine if a write is permitted under the current
+ * circumstances, and set *write_permitted_ptr accordingly.
+ * As a general rule it is, but when we are running in parallel
+ * mode with collective I/O, we must ensure that a read cannot
+ * cause a write.
+ *
+ * In the event of failure, the value of *write_permitted_ptr
+ * is undefined.
+ *
+ * Return: Non-negative on success/Negative on failure.
+ *
+ * Programmer: John Mainzer, 5/15/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static herr_t
+check_write_permitted(H5F_t UNUSED * f,
+ hid_t UNUSED dxpl_id,
+ hbool_t * write_permitted_ptr)
+{
+
+ HDassert( write_permitted_ptr );
+ *write_permitted_ptr = write_permitted;
+
+ return(SUCCEED);
+
+} /* check_write_permitted() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: clear & friends
+ *
+ * Purpose: clear the entry. The helper functions verify that the
+ * correct version of clear is being called, and then call
+ * clear proper.
+ *
+ * Return: SUCCEED
+ *
+ * Programmer: John Mainzer
+ * 6/10/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static herr_t
+clear(H5F_t * f,
+ void * thing,
+ hbool_t dest)
+{
+ test_entry_t * entry_ptr;
+ test_entry_t * base_addr;
+
+ HDassert( thing );
+
+ entry_ptr = (test_entry_t *)thing;
+ base_addr = entries[entry_ptr->type];
+
+ HDassert( entry_ptr->index >= 0 );
+ HDassert( entry_ptr->index <= max_indices[entry_ptr->type] );
+ HDassert( entry_ptr == &(base_addr[entry_ptr->index]) );
+ HDassert( entry_ptr == entry_ptr->self );
+ HDassert( entry_ptr->header.addr == entry_ptr->addr );
+ HDassert( entry_ptr->header.size == entry_ptr->size );
+ HDassert( entry_ptr->size == entry_sizes[entry_ptr->type] );
+
+ entry_ptr->header.dirty = FALSE;
+ entry_ptr->dirty = FALSE;
+
+ if ( dest ) {
+
+ destroy(f, thing);
+
+ }
+
+ return(SUCCEED);
+
+} /* clear() */
+
+herr_t
+pico_clear(H5F_t * f, void * thing, hbool_t dest)
+{
+ HDassert ( ((test_entry_t *)thing)->type == PICO_ENTRY_TYPE );
+ return(clear(f, thing, dest));
+}
+
+herr_t
+nano_clear(H5F_t * f, void * thing, hbool_t dest)
+{
+ HDassert ( ((test_entry_t *)thing)->type == NANO_ENTRY_TYPE );
+ return(clear(f, thing, dest));
+}
+
+herr_t
+micro_clear(H5F_t * f, void * thing, hbool_t dest)
+{
+ HDassert ( ((test_entry_t *)thing)->type == MICRO_ENTRY_TYPE );
+ return(clear(f, thing, dest));
+}
+
+herr_t
+tiny_clear(H5F_t * f, void * thing, hbool_t dest)
+{
+ HDassert ( ((test_entry_t *)thing)->type == TINY_ENTRY_TYPE );
+ return(clear(f, thing, dest));
+}
+
+herr_t
+small_clear(H5F_t * f, void * thing, hbool_t dest)
+{
+ HDassert ( ((test_entry_t *)thing)->type == SMALL_ENTRY_TYPE );
+ return(clear(f, thing, dest));
+}
+
+herr_t
+medium_clear(H5F_t * f, void * thing, hbool_t dest)
+{
+ HDassert ( ((test_entry_t *)thing)->type == MEDIUM_ENTRY_TYPE );
+ return(clear(f, thing, dest));
+}
+
+herr_t
+large_clear(H5F_t * f, void * thing, hbool_t dest)
+{
+ HDassert ( ((test_entry_t *)thing)->type == LARGE_ENTRY_TYPE );
+ return(clear(f, thing, dest));
+}
+
+herr_t
+huge_clear(H5F_t * f, void * thing, hbool_t dest)
+{
+ HDassert ( ((test_entry_t *)thing)->type == HUGE_ENTRY_TYPE );
+ return(clear(f, thing, dest));
+}
+
+herr_t
+monster_clear(H5F_t * f, void * thing, hbool_t dest)
+{
+ HDassert ( ((test_entry_t *)thing)->type == MONSTER_ENTRY_TYPE );
+ return(clear(f, thing, dest));
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: dest & friends
+ *
+ * Purpose: Destroy the entry. The helper functions verify that the
+ * correct version of dest is being called, and then call
+ * dest proper.
+ *
+ * Return: SUCCEED
+ *
+ * Programmer: John Mainzer
+ * 6/10/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static herr_t
+destroy(H5F_t UNUSED * f,
+ void * thing)
+{
+ test_entry_t * entry_ptr;
+ test_entry_t * base_addr;
+
+ HDassert( thing );
+
+ entry_ptr = (test_entry_t *)thing;
+ base_addr = entries[entry_ptr->type];
+
+ HDassert ( entry_ptr->index >= 0 );
+ HDassert ( entry_ptr->index <= max_indices[entry_ptr->type] );
+ HDassert( entry_ptr == &(base_addr[entry_ptr->index]) );
+ HDassert( entry_ptr == entry_ptr->self );
+ HDassert( entry_ptr->header.addr == entry_ptr->addr );
+ HDassert( entry_ptr->header.size == entry_ptr->size );
+ HDassert( entry_ptr->size == entry_sizes[entry_ptr->type] );
+
+ HDassert( !(entry_ptr->dirty) );
+ HDassert( !(entry_ptr->header.dirty) );
+
+ return(SUCCEED);
+
+} /* dest() */
+
+herr_t
+pico_dest(H5F_t * f, void * thing)
+{
+ HDassert ( ((test_entry_t *)thing)->type == PICO_ENTRY_TYPE );
+ return(destroy(f, thing));
+}
+
+herr_t
+nano_dest(H5F_t * f, void * thing)
+{
+ HDassert ( ((test_entry_t *)thing)->type == NANO_ENTRY_TYPE );
+ return(destroy(f, thing));
+}
+
+herr_t
+micro_dest(H5F_t * f, void * thing)
+{
+ HDassert ( ((test_entry_t *)thing)->type == MICRO_ENTRY_TYPE );
+ return(destroy(f, thing));
+}
+
+herr_t
+tiny_dest(H5F_t * f, void * thing)
+{
+ HDassert ( ((test_entry_t *)thing)->type == TINY_ENTRY_TYPE );
+ return(destroy(f, thing));
+}
+
+herr_t
+small_dest(H5F_t * f, void * thing)
+{
+ HDassert ( ((test_entry_t *)thing)->type == SMALL_ENTRY_TYPE );
+ return(destroy(f, thing));
+}
+
+herr_t
+medium_dest(H5F_t * f, void * thing)
+{
+ HDassert ( ((test_entry_t *)thing)->type == MEDIUM_ENTRY_TYPE );
+ return(destroy(f, thing));
+}
+
+herr_t
+large_dest(H5F_t * f, void * thing)
+{
+ HDassert ( ((test_entry_t *)thing)->type == LARGE_ENTRY_TYPE );
+ return(destroy(f, thing));
+}
+
+herr_t
+huge_dest(H5F_t * f, void * thing)
+{
+ HDassert ( ((test_entry_t *)thing)->type == HUGE_ENTRY_TYPE );
+ return(destroy(f, thing));
+}
+
+herr_t
+monster_dest(H5F_t * f, void * thing)
+{
+ HDassert ( ((test_entry_t *)thing)->type == MONSTER_ENTRY_TYPE );
+ return(destroy(f, thing));
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: flush & friends
+ *
+ * Purpose: flush the entry and mark it as clean. The helper functions
+ * verify that the correct version of flush is being called,
+ * and then call flush proper.
+ *
+ * Return: SUCCEED
+ *
+ * Programmer: John Mainzer
+ * 6/10/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static herr_t
+flush(H5F_t *f,
+ hid_t UNUSED dxpl_id,
+ hbool_t dest,
+ haddr_t addr,
+ void *thing)
+{
+ test_entry_t * entry_ptr;
+ test_entry_t * base_addr;
+
+ HDassert( thing );
+
+ entry_ptr = (test_entry_t *)thing;
+ base_addr = entries[entry_ptr->type];
+
+ HDassert( entry_ptr->index >= 0 );
+ HDassert( entry_ptr->index <= max_indices[entry_ptr->type] );
+ HDassert( entry_ptr == &(base_addr[entry_ptr->index]) );
+ HDassert( entry_ptr == entry_ptr->self );
+ HDassert( entry_ptr->header.addr == entry_ptr->addr );
+ HDassert( entry_ptr->addr == addr );
+ HDassert( entry_ptr->header.size == entry_ptr->size );
+ HDassert( entry_ptr->size == entry_sizes[entry_ptr->type] );
+
+ if ( ( ! write_permitted ) && ( entry_ptr->dirty ) ) {
+
+ pass = FALSE;
+ failure_mssg = "called flush when write_permitted is FALSE.";
+ }
+
+ if ( entry_ptr->dirty ) {
+
+ (entry_ptr->writes)++;
+ entry_ptr->dirty = FALSE;
+ entry_ptr->header.dirty = FALSE;
+ }
+
+ if ( dest ) {
+
+ destroy(f, thing);
+
+ }
+
+ return(SUCCEED);
+
+} /* flush() */
+
+herr_t
+pico_flush(H5F_t *f, hid_t dxpl_id, hbool_t dest, haddr_t addr, void *thing)
+{
+ HDassert ( ((test_entry_t *)thing)->type == PICO_ENTRY_TYPE );
+ return(flush(f, dxpl_id, dest, addr, thing));
+}
+
+herr_t
+nano_flush(H5F_t *f, hid_t dxpl_id, hbool_t dest, haddr_t addr, void *thing)
+{
+ HDassert ( ((test_entry_t *)thing)->type == NANO_ENTRY_TYPE );
+ return(flush(f, dxpl_id, dest, addr, thing));
+}
+
+herr_t
+micro_flush(H5F_t *f, hid_t dxpl_id, hbool_t dest, haddr_t addr, void *thing)
+{
+ HDassert ( ((test_entry_t *)thing)->type == MICRO_ENTRY_TYPE );
+ return(flush(f, dxpl_id, dest, addr, thing));
+}
+
+herr_t
+tiny_flush(H5F_t *f, hid_t dxpl_id, hbool_t dest, haddr_t addr, void *thing)
+{
+ HDassert ( ((test_entry_t *)thing)->type == TINY_ENTRY_TYPE );
+ return(flush(f, dxpl_id, dest, addr, thing));
+}
+
+herr_t
+small_flush(H5F_t *f, hid_t dxpl_id, hbool_t dest, haddr_t addr, void *thing)
+{
+ HDassert ( ((test_entry_t *)thing)->type == SMALL_ENTRY_TYPE );
+ return(flush(f, dxpl_id, dest, addr, thing));
+}
+
+herr_t
+medium_flush(H5F_t *f, hid_t dxpl_id, hbool_t dest, haddr_t addr, void *thing)
+{
+ HDassert ( ((test_entry_t *)thing)->type == MEDIUM_ENTRY_TYPE );
+ return(flush(f, dxpl_id, dest, addr, thing));
+}
+
+herr_t
+large_flush(H5F_t *f, hid_t dxpl_id, hbool_t dest, haddr_t addr, void *thing)
+{
+ HDassert ( ((test_entry_t *)thing)->type == LARGE_ENTRY_TYPE );
+ return(flush(f, dxpl_id, dest, addr, thing));
+}
+
+herr_t
+huge_flush(H5F_t *f, hid_t dxpl_id, hbool_t dest, haddr_t addr, void *thing)
+{
+ HDassert ( ((test_entry_t *)thing)->type == HUGE_ENTRY_TYPE );
+ return(flush(f, dxpl_id, dest, addr, thing));
+}
+
+herr_t
+monster_flush(H5F_t *f, hid_t dxpl_id, hbool_t dest, haddr_t addr, void *thing)
+{
+ HDassert ( ((test_entry_t *)thing)->type == MONSTER_ENTRY_TYPE );
+ return(flush(f, dxpl_id, dest, addr, thing));
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: load & friends
+ *
+ * Purpose: "load" the requested entry and mark it as clean. The
+ * helper functions verify that the correct version of load
+ * is being called, and then call load proper.
+ *
+ * Return: SUCCEED
+ *
+ * Programmer: John Mainzer
+ * 6/10/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void *
+load(H5F_t UNUSED *f,
+ hid_t UNUSED dxpl_id,
+ haddr_t addr,
+ const void UNUSED *udata1,
+ void UNUSED *udata2)
+{
+ int32_t type;
+ int32_t idx;
+ test_entry_t * entry_ptr;
+ test_entry_t * base_addr;
+
+ addr_to_type_and_index(addr, &type, &idx);
+
+ base_addr = entries[type];
+ entry_ptr = &(base_addr[idx]);
+
+ HDassert( entry_ptr->type == type );
+ HDassert( entry_ptr->type >= 0 );
+ HDassert( entry_ptr->type < NUMBER_OF_ENTRY_TYPES );
+ HDassert( entry_ptr->index == idx );
+ HDassert( entry_ptr->index >= 0 );
+ HDassert( entry_ptr->index <= max_indices[type] );
+ HDassert( entry_ptr == entry_ptr->self );
+ HDassert( entry_ptr->addr == addr );
+ HDassert( entry_ptr->size == entry_sizes[type] );
+
+ entry_ptr->dirty = FALSE;
+
+ (entry_ptr->reads)++;
+
+ return(entry_ptr);
+
+} /* load() */
+
+void *
+pico_load(H5F_t *f, hid_t dxpl_id, haddr_t addr,
+ const void *udata1, void *udata2)
+{
+ return(load(f, dxpl_id, addr, udata1, udata2));
+}
+
+void *
+nano_load(H5F_t *f, hid_t dxpl_id, haddr_t addr,
+ const void *udata1, void *udata2)
+{
+ return(load(f, dxpl_id, addr, udata1, udata2));
+}
+
+void *
+micro_load(H5F_t *f, hid_t dxpl_id, haddr_t addr,
+ const void *udata1, void *udata2)
+{
+ return(load(f, dxpl_id, addr, udata1, udata2));
+}
+
+void *
+tiny_load(H5F_t *f, hid_t dxpl_id, haddr_t addr,
+ const void *udata1, void *udata2)
+{
+ return(load(f, dxpl_id, addr, udata1, udata2));
+}
+
+void *
+small_load(H5F_t *f, hid_t dxpl_id, haddr_t addr,
+ const void *udata1, void *udata2)
+{
+ return(load(f, dxpl_id, addr, udata1, udata2));
+}
+
+void *
+medium_load(H5F_t *f, hid_t dxpl_id, haddr_t addr,
+ const void *udata1, void *udata2)
+{
+ return(load(f, dxpl_id, addr, udata1, udata2));
+}
+
+void *
+large_load(H5F_t *f, hid_t dxpl_id, haddr_t addr,
+ const void *udata1, void *udata2)
+{
+ return(load(f, dxpl_id, addr, udata1, udata2));
+}
+
+void *
+huge_load(H5F_t *f, hid_t dxpl_id, haddr_t addr,
+ const void *udata1, void *udata2)
+{
+ return(load(f, dxpl_id, addr, udata1, udata2));
+}
+
+void *
+monster_load(H5F_t *f, hid_t dxpl_id, haddr_t addr,
+ const void *udata1, void *udata2)
+{
+ return(load(f, dxpl_id, addr, udata1, udata2));
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: size & friends
+ *
+ * Purpose: Get the size of the specified entry. The helper functions
+ * verify that the correct version of size is being called,
+ * and then call size proper.
+ *
+ * Return: SUCCEED
+ *
+ * Programmer: John Mainzer
+ * 6/10/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static herr_t
+size(H5F_t UNUSED * f,
+ void * thing,
+ size_t * size_ptr)
+{
+ test_entry_t * entry_ptr;
+ test_entry_t * base_addr;
+
+ HDassert( size_ptr );
+ HDassert( thing );
+
+ entry_ptr = (test_entry_t *)thing;
+ base_addr = entries[entry_ptr->type];
+
+ HDassert( entry_ptr->index >= 0 );
+ HDassert( entry_ptr->index <= max_indices[entry_ptr->type] );
+ HDassert( entry_ptr == &(base_addr[entry_ptr->index]) );
+ HDassert( entry_ptr == entry_ptr->self );
+ HDassert( entry_ptr->header.addr == entry_ptr->addr );
+ HDassert( entry_ptr->size == entry_sizes[entry_ptr->type] );
+
+ *size_ptr = entry_ptr->size;
+
+ return(SUCCEED);
+
+} /* size() */
+
+herr_t
+pico_size(H5F_t * f, void * thing, size_t * size_ptr)
+{
+ HDassert ( ((test_entry_t *)thing)->type == PICO_ENTRY_TYPE );
+ return(size(f, thing, size_ptr));
+}
+
+herr_t
+nano_size(H5F_t * f, void * thing, size_t * size_ptr)
+{
+ HDassert ( ((test_entry_t *)thing)->type == NANO_ENTRY_TYPE );
+ return(size(f, thing, size_ptr));
+}
+
+herr_t
+micro_size(H5F_t * f, void * thing, size_t * size_ptr)
+{
+ HDassert ( ((test_entry_t *)thing)->type == MICRO_ENTRY_TYPE );
+ return(size(f, thing, size_ptr));
+}
+
+herr_t
+tiny_size(H5F_t * f, void * thing, size_t * size_ptr)
+{
+ HDassert ( ((test_entry_t *)thing)->type == TINY_ENTRY_TYPE );
+ return(size(f, thing, size_ptr));
+}
+
+herr_t
+small_size(H5F_t * f, void * thing, size_t * size_ptr)
+{
+ HDassert ( ((test_entry_t *)thing)->type == SMALL_ENTRY_TYPE );
+ return(size(f, thing, size_ptr));
+}
+
+herr_t
+medium_size(H5F_t * f, void * thing, size_t * size_ptr)
+{
+ HDassert ( ((test_entry_t *)thing)->type == MEDIUM_ENTRY_TYPE );
+ return(size(f, thing, size_ptr));
+}
+
+herr_t
+large_size(H5F_t * f, void * thing, size_t * size_ptr)
+{
+ HDassert ( ((test_entry_t *)thing)->type == LARGE_ENTRY_TYPE );
+ return(size(f, thing, size_ptr));
+}
+
+herr_t
+huge_size(H5F_t * f, void * thing, size_t * size_ptr)
+{
+ HDassert ( ((test_entry_t *)thing)->type == HUGE_ENTRY_TYPE );
+ return(size(f, thing, size_ptr));
+}
+
+herr_t
+monster_size(H5F_t * f, void * thing, size_t * size_ptr)
+{
+ HDassert ( ((test_entry_t *)thing)->type == MONSTER_ENTRY_TYPE );
+ return(size(f, thing, size_ptr));
+}
+
+
+/**************************************************************************/
+/**************************************************************************/
+/************************** test utility functions: ***********************/
+/**************************************************************************/
+/**************************************************************************/
+
+/*-------------------------------------------------------------------------
+ * Function: entry_in_cache
+ *
+ * Purpose: Given a pointer to a cache, an entry type, and an index,
+ * determine if the entry is currently in the cache.
+ *
+ * Return: TRUE if the entry is in the cache, and FALSE otherwise.
+ *
+ * Programmer: John Mainzer
+ * 6/10/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+hbool_t
+entry_in_cache(H5C_t * cache_ptr,
+ int32_t type,
+ int32_t idx)
+{
+ hbool_t in_cache = FALSE; /* will set to TRUE if necessary */
+ test_entry_t * base_addr;
+ test_entry_t * entry_ptr;
+ test_entry_t search_target;
+ H5TB_TREE * index_tree_ptr;
+ H5TB_NODE * node_ptr = NULL;
+
+ HDassert( cache_ptr );
+ HDassert( ( 0 <= type ) && ( type < NUMBER_OF_ENTRY_TYPES ) );
+ HDassert( ( 0 <= idx ) && ( idx <= max_indices[type] ) );
+
+ base_addr = entries[type];
+ entry_ptr = &(base_addr[idx]);
+
+ HDassert( entry_ptr->index == idx );
+ HDassert( entry_ptr->type == type );
+ HDassert( entry_ptr == entry_ptr->self );
+
+ search_target.header.addr = entry_ptr->addr;
+
+ index_tree_ptr = ((local_H5C_t *)cache_ptr)->index_tree_ptr;
+
+ node_ptr = H5TB_dfind(index_tree_ptr, &search_target, NULL);
+
+ if ( node_ptr != NULL ) {
+
+ in_cache = TRUE;
+ HDassert( entry_ptr->addr == entry_ptr->header.addr );
+ HDassert( node_ptr->key == ((void *)(entry_ptr)) );
+ }
+
+ return(in_cache);
+
+} /* entry_in_cache() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: reset_entries
+ *
+ * Purpose: reset the contents of the entries arrays to know values.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 6/10/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+reset_entries(void)
+
+{
+ int i;
+ int j;
+ int32_t max_index;
+ haddr_t addr = 0;
+ haddr_t alt_addr = PICO_ALT_BASE_ADDR;
+ size_t entry_size;
+ test_entry_t * base_addr;
+
+ for ( i = 0; i < NUMBER_OF_ENTRY_TYPES; i++ )
+ {
+ entry_size = entry_sizes[i];
+ max_index = max_indices[i];
+ base_addr = entries[i];
+
+ HDassert( base_addr );
+
+ for ( j = 0; j <= max_index; j++ )
+ {
+ /* one can argue that we should fill the header with garbage.
+ * If this is desired, we can simply comment out the header
+ * initialization - the headers will be full of garbage soon
+ * enough.
+ */
+
+ base_addr[j].header.addr = (haddr_t)0;
+ base_addr[j].header.size = (size_t)0;
+ base_addr[j].header.type = NULL;
+ base_addr[j].header.dirty = FALSE;
+ base_addr[j].header.protected = FALSE;
+ base_addr[j].header.next = NULL;
+ base_addr[j].header.prev = NULL;
+ base_addr[j].header.aux_next = NULL;
+ base_addr[j].header.aux_prev = NULL;
+
+ base_addr[j].self = &(base_addr[j]);
+ base_addr[j].addr = addr;
+ base_addr[j].at_main_addr = TRUE;
+ base_addr[j].main_addr = addr;
+ base_addr[j].alt_addr = alt_addr;
+ base_addr[j].size = entry_size;
+ base_addr[j].type = i;
+ base_addr[j].index = j;
+ base_addr[j].reads = 0;
+ base_addr[j].writes = 0;
+ base_addr[j].dirty = FALSE;
+ base_addr[j].protected = FALSE;
+
+ addr += (haddr_t)entry_size;
+ alt_addr += (haddr_t)entry_size;
+ }
+ }
+
+ return;
+
+} /* reset_entries() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: verify_clean
+ *
+ * Purpose: Verify that all cache entries are marked as clean. If any
+ * are not, set pass to FALSE.
+ *
+ * Do nothing if pass is FALSE on entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 6/10/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+verify_clean(void)
+
+{
+ int i;
+ int j;
+ int dirty_count = 0;
+ int32_t max_index;
+ test_entry_t * base_addr;
+
+ if ( pass ) {
+
+ for ( i = 0; i < NUMBER_OF_ENTRY_TYPES; i++ )
+ {
+ max_index = max_indices[i];
+ base_addr = entries[i];
+
+ HDassert( base_addr );
+
+ for ( j = 0; j <= max_index; j++ )
+ {
+ if ( ( base_addr[j].header.dirty ) || ( base_addr[j].dirty ) ) {
+
+ dirty_count++;
+ }
+ }
+ }
+
+ if ( dirty_count > 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "verify_clean() found dirty entry(s).";
+ }
+ }
+
+ return;
+
+} /* verify_clean() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: verify_unprotected
+ *
+ * Purpose: Verify that no cache entries are marked as protected. If
+ * any are, set pass to FALSE.
+ *
+ * Do nothing if pass is FALSE on entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 6/10/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+verify_unprotected(void)
+
+{
+ int i;
+ int j;
+ int protected_count = 0;
+ int32_t max_index;
+ test_entry_t * base_addr;
+
+ if ( pass ) {
+
+ for ( i = 0; i < NUMBER_OF_ENTRY_TYPES; i++ )
+ {
+ max_index = max_indices[i];
+ base_addr = entries[i];
+
+ HDassert( base_addr );
+
+ for ( j = 0; j <= max_index; j++ )
+ {
+ HDassert( base_addr[j].header.protected ==
+ base_addr[j].protected );
+
+ if ( ( base_addr[j].header.protected ) ||
+ ( base_addr[j].protected ) ) {
+
+ protected_count++;
+ }
+ }
+ }
+
+ if ( protected_count > 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "verify_unprotected() found protected entry(s).";
+ }
+ }
+
+ return;
+
+} /* verify_unprotected() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: setup_cache()
+ *
+ * Purpose: Allocate a cache of the desired size and configure it for
+ * use in the test bed. Return a pointer to the new cache
+ * structure.
+ *
+ * Return: Pointer to new cache, or NULL on failure.
+ *
+ * Programmer: John Mainzer
+ * 6/11/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static H5C_t *
+setup_cache(size_t max_cache_size,
+ size_t min_clean_size)
+{
+ H5C_t * cache_ptr = NULL;
+
+ cache_ptr = H5C_create(max_cache_size,
+ min_clean_size,
+ (NUMBER_OF_ENTRY_TYPES - 1),
+ &entry_type_names,
+ check_write_permitted);
+
+ if ( cache_ptr == NULL ) {
+
+ pass = FALSE;
+ failure_mssg = "H5C_create() returned NULL.";
+
+ } else {
+
+ H5C_set_skip_flags(cache_ptr, TRUE, TRUE);
+ }
+
+ return(cache_ptr);
+
+} /* setup_cache() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: takedown_cache()
+ *
+ * Purpose: Flush the specified cache and disable it. If requested,
+ * dump stats first. If pass is FALSE, do nothing.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 6/11/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+takedown_cache(H5C_t * cache_ptr,
+ hbool_t dump_stats,
+ hbool_t dump_detailed_stats)
+{
+ HDassert(cache_ptr);
+
+ if ( pass ) {
+
+ if ( dump_stats ) {
+
+ H5C_stats(cache_ptr, "test cache", dump_detailed_stats);
+ }
+
+ H5C_dest(NULL, -1, -1, cache_ptr);
+ }
+
+ return;
+
+} /* takedown_cache() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: flush_cache()
+ *
+ * Purpose: Flush the specified cache, destroying all entries if
+ requested. If requested, dump stats first.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 6/23/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+flush_cache(H5C_t * cache_ptr,
+ hbool_t destroy_entries,
+ hbool_t dump_stats,
+ hbool_t dump_detailed_stats)
+{
+ herr_t result = 0;
+
+ HDassert(cache_ptr);
+
+ verify_unprotected();
+
+ if ( pass ) {
+
+ if ( destroy_entries ) {
+
+ result = H5C_flush_cache(NULL, -1, -1, cache_ptr,
+ H5F_FLUSH_INVALIDATE);
+
+ } else {
+
+ result = H5C_flush_cache(NULL, -1, -1, cache_ptr, 0);
+ }
+ }
+
+ if ( dump_stats ) {
+
+ H5C_stats(cache_ptr, "test cache", dump_detailed_stats);
+ }
+
+ if ( result < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "error in H5C_flush_cache().";
+ }
+
+ return;
+
+} /* flush_cache() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: insert_entry()
+ *
+ * Purpose: Insert the entry indicated by the type and index. Mark
+ * it clean or dirty as indicated.
+ *
+ * Note that I don't see much practical use for inserting
+ * a clean entry, but the interface permits it so we should
+ * test it.
+ *
+ * Do nothing if pass is false.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 6/16/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+insert_entry(H5C_t * cache_ptr,
+ int32_t type,
+ int32_t idx,
+ hbool_t dirty)
+{
+ herr_t result;
+ test_entry_t * base_addr;
+ test_entry_t * entry_ptr;
+
+ if ( pass ) {
+
+ HDassert( cache_ptr );
+ HDassert( ( 0 <= type ) && ( type < NUMBER_OF_ENTRY_TYPES ) );
+ HDassert( ( 0 <= idx ) && ( idx <= max_indices[type] ) );
+
+ base_addr = entries[type];
+ entry_ptr = &(base_addr[idx]);
+
+ HDassert( entry_ptr->index == idx );
+ HDassert( entry_ptr->type == type );
+ HDassert( entry_ptr == entry_ptr->self );
+ HDassert( !(entry_ptr->protected) );
+
+ if ( dirty ) {
+
+ (entry_ptr->header).dirty = dirty;
+ entry_ptr->dirty = dirty;
+ }
+
+ result = H5C_insert_entry(NULL, -1, -1, cache_ptr, &(types[type]),
+ entry_ptr->addr, (void *)entry_ptr);
+
+ if ( ( result < 0 ) ||
+ ( entry_ptr->header.protected ) ||
+ ( entry_ptr->header.type != &(types[type]) ) ||
+ ( entry_ptr->size != entry_ptr->header.size ) ||
+ ( entry_ptr->addr != entry_ptr->header.addr ) ) {
+
+ pass = FALSE;
+ failure_mssg = "error in H5C_insert().";
+ }
+
+ HDassert( ((entry_ptr->header).type)->id == type );
+ }
+
+ return;
+
+} /* insert_entry() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: rename_entry()
+ *
+ * Purpose: Rename the entry indicated by the type and index to its
+ * main or alternate address as indicated. If the entry is
+ * already at the desired entry, do nothing.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 6/21/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+rename_entry(H5C_t * cache_ptr,
+ int32_t type,
+ int32_t idx,
+ hbool_t main_addr)
+{
+ herr_t result;
+ hbool_t done = TRUE; /* will set to FALSE if we have work to do */
+ haddr_t old_addr;
+ haddr_t new_addr;
+ test_entry_t * base_addr;
+ test_entry_t * entry_ptr;
+
+ HDassert( cache_ptr );
+ HDassert( ( 0 <= type ) && ( type < NUMBER_OF_ENTRY_TYPES ) );
+ HDassert( ( 0 <= idx ) && ( idx <= max_indices[type] ) );
+
+ base_addr = entries[type];
+ entry_ptr = &(base_addr[idx]);
+
+ HDassert( entry_ptr->index == idx );
+ HDassert( entry_ptr->type == type );
+ HDassert( entry_ptr == entry_ptr->self );
+ HDassert( !(entry_ptr->protected) );
+ HDassert( !(entry_ptr->header.protected) );
+
+ if ( entry_ptr->at_main_addr && !main_addr ) {
+
+ /* rename to alt addr */
+
+ HDassert( entry_ptr->addr == entry_ptr->main_addr );
+
+ done = FALSE;
+ old_addr = entry_ptr->addr;
+ new_addr = entry_ptr->alt_addr;
+
+ } else if ( !(entry_ptr->at_main_addr) && main_addr ) {
+
+ /* rename to main addr */
+
+ HDassert( entry_ptr->addr == entry_ptr->alt_addr );
+
+ done = FALSE;
+ old_addr = entry_ptr->addr;
+ new_addr = entry_ptr->main_addr;
+ }
+
+ if ( ! done ) {
+
+ result = H5C_rename_entry(NULL, cache_ptr, &(types[type]),
+ old_addr, new_addr);
+ }
+
+ if ( ! done ) {
+
+ if ( ( result < 0 ) || ( entry_ptr->header.addr != new_addr ) ) {
+
+ pass = FALSE;
+ failure_mssg = "error in H5C_rename_entry().";
+
+ } else {
+
+ entry_ptr->addr = new_addr;
+ entry_ptr->at_main_addr = main_addr;
+ }
+ }
+
+ HDassert( ((entry_ptr->header).type)->id == type );
+
+ return;
+
+} /* insert_entry() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: protect_entry()
+ *
+ * Purpose: Protect the entry indicated by the type and index.
+ *
+ * Do nothing if pass is FALSE on entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 6/11/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+protect_entry(H5C_t * cache_ptr,
+ int32_t type,
+ int32_t idx)
+{
+ /* const char * fcn_name = "protect_entry()"; */
+ test_entry_t * base_addr;
+ test_entry_t * entry_ptr;
+ H5C_cache_entry_t * cache_entry_ptr;
+
+ if ( pass ) {
+
+ HDassert( cache_ptr );
+ HDassert( ( 0 <= type ) && ( type < NUMBER_OF_ENTRY_TYPES ) );
+ HDassert( ( 0 <= idx ) && ( idx <= max_indices[type] ) );
+
+ base_addr = entries[type];
+ entry_ptr = &(base_addr[idx]);
+
+ HDassert( entry_ptr->index == idx );
+ HDassert( entry_ptr->type == type );
+ HDassert( entry_ptr == entry_ptr->self );
+ HDassert( !(entry_ptr->protected) );
+
+ cache_entry_ptr = H5C_protect(NULL, -1, -1, cache_ptr, &(types[type]),
+ entry_ptr->addr, NULL, NULL);
+
+ if ( ( cache_entry_ptr != (void *)entry_ptr ) ||
+ ( !(entry_ptr->header.protected) ) ||
+ ( entry_ptr->header.type != &(types[type]) ) ||
+ ( entry_ptr->size != entry_ptr->header.size ) ||
+ ( entry_ptr->addr != entry_ptr->header.addr ) ) {
+
+ pass = FALSE;
+ failure_mssg = "error in H5C_protect().";
+
+ } else {
+
+ entry_ptr->protected = TRUE;
+
+ }
+
+ HDassert( ((entry_ptr->header).type)->id == type );
+ }
+
+ return;
+
+} /* protect_entry() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: unprotect_entry()
+ *
+ * Purpose: Unprotect the entry indicated by the type and index.
+ *
+ * Do nothing if pass is FALSE on entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 6/12/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#define NO_CHANGE -1
+
+static void
+unprotect_entry(H5C_t * cache_ptr,
+ int32_t type,
+ int32_t idx,
+ int dirty,
+ hbool_t deleted)
+{
+ /* const char * fcn_name = "unprotect_entry()"; */
+ herr_t result;
+ test_entry_t * base_addr;
+ test_entry_t * entry_ptr;
+
+ if ( pass ) {
+
+ HDassert( cache_ptr );
+ HDassert( ( 0 <= type ) && ( type < NUMBER_OF_ENTRY_TYPES ) );
+ HDassert( ( 0 <= idx ) && ( idx <= max_indices[type] ) );
+
+ base_addr = entries[type];
+ entry_ptr = &(base_addr[idx]);
+
+ HDassert( entry_ptr->index == idx );
+ HDassert( entry_ptr->type == type );
+ HDassert( entry_ptr == entry_ptr->self );
+ HDassert( entry_ptr->header.protected );
+ HDassert( entry_ptr->protected );
+
+ if ( ( dirty == TRUE ) || ( dirty == FALSE ) ) {
+
+ entry_ptr->header.dirty = dirty;
+ entry_ptr->dirty = dirty;
+ }
+
+ result = H5C_unprotect(NULL, -1, -1, cache_ptr, &(types[type]),
+ entry_ptr->addr, (void *)entry_ptr, deleted);
+
+ if ( ( result < 0 ) ||
+ ( entry_ptr->header.protected ) ||
+ ( entry_ptr->header.type != &(types[type]) ) ||
+ ( entry_ptr->size != entry_ptr->header.size ) ||
+ ( entry_ptr->addr != entry_ptr->header.addr ) ) {
+
+ pass = FALSE;
+ failure_mssg = "error in H5C_unprotect().";
+
+ }
+ else
+ {
+ entry_ptr->protected = FALSE;
+ }
+
+ HDassert( ((entry_ptr->header).type)->id == type );
+ }
+
+ return;
+
+} /* unprotect_entry() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: row_major_scan_forward()
+ *
+ * Purpose: Do a sequence of inserts, protects, unprotects, renames,
+ * destroys while scanning through the set of entries. If
+ * pass is false on entry, do nothing.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 6/12/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+row_major_scan_forward(H5C_t * cache_ptr,
+ int32_t lag,
+ hbool_t verbose,
+ hbool_t reset_stats,
+ hbool_t display_stats,
+ hbool_t display_detailed_stats,
+ hbool_t do_inserts,
+ hbool_t dirty_inserts,
+ hbool_t do_renames,
+ hbool_t rename_to_main_addr,
+ hbool_t do_destroys,
+ int dirty_destroys,
+ int dirty_unprotects)
+{
+ const char * fcn_name = "row_major_scan_forward";
+ int32_t type;
+ int32_t idx;
+
+ if ( verbose )
+ HDfprintf(stdout, "%s(): entering.\n", fcn_name);
+
+ HDassert( lag > 5 );
+
+ type = 0;
+
+ if ( ( pass ) && ( reset_stats ) ) {
+
+ H5C_stats__reset(cache_ptr);
+ }
+
+ while ( ( pass ) && ( type < NUMBER_OF_ENTRY_TYPES ) )
+ {
+ idx = -lag;
+
+ while ( ( pass ) && ( idx <= (max_indices[type] + lag) ) )
+ {
+ if ( ( pass ) && ( do_inserts ) && ( (idx + lag) >= 0 ) &&
+ ( (idx + lag) <= max_indices[type] ) &&
+ ( ((idx + lag) % 2) == 0 ) &&
+ ( ! entry_in_cache(cache_ptr, type, (idx + lag)) ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(i, %d, %d) ", type, (idx + lag));
+
+ insert_entry(cache_ptr, type, (idx + lag), dirty_inserts);
+ }
+
+
+ if ( ( pass ) && ( (idx + lag - 1) >= 0 ) &&
+ ( (idx + lag - 1) <= max_indices[type] ) &&
+ ( ( (idx + lag - 1) % 3 ) == 0 ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(p, %d, %d) ", type, (idx + lag - 1));
+
+ protect_entry(cache_ptr, type, (idx + lag - 1));
+ }
+
+ if ( ( pass ) && ( (idx + lag - 2) >= 0 ) &&
+ ( (idx + lag - 2) <= max_indices[type] ) &&
+ ( ( (idx + lag - 2) % 3 ) == 0 ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(u, %d, %d) ", type, (idx + lag - 2));
+
+ unprotect_entry(cache_ptr, type, idx+lag-2, NO_CHANGE, FALSE);
+ }
+
+
+ if ( ( pass ) && ( do_renames ) && ( (idx + lag - 2) >= 0 ) &&
+ ( (idx + lag - 2) <= max_indices[type] ) &&
+ ( ( (idx + lag - 2) % 3 ) == 0 ) ) {
+
+ rename_entry(cache_ptr, type, (idx + lag - 2),
+ rename_to_main_addr);
+ }
+
+
+ if ( ( pass ) && ( (idx + lag - 3) >= 0 ) &&
+ ( (idx + lag - 3) <= max_indices[type] ) &&
+ ( ( (idx + lag - 3) % 5 ) == 0 ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(p, %d, %d) ", type, (idx + lag - 3));
+
+ protect_entry(cache_ptr, type, (idx + lag - 3));
+ }
+
+ if ( ( pass ) && ( (idx + lag - 5) >= 0 ) &&
+ ( (idx + lag - 5) <= max_indices[type] ) &&
+ ( ( (idx + lag - 5) % 5 ) == 0 ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(u, %d, %d) ", type, (idx + lag - 5));
+
+ unprotect_entry(cache_ptr, type, idx+lag-5, NO_CHANGE, FALSE);
+ }
+
+ if ( ( pass ) && ( idx >= 0 ) && ( idx <= max_indices[type] ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(p, %d, %d) ", type, idx);
+
+ protect_entry(cache_ptr, type, idx);
+ }
+
+
+ if ( ( pass ) && ( (idx - lag + 2) >= 0 ) &&
+ ( (idx - lag + 2) <= max_indices[type] ) &&
+ ( ( (idx - lag + 2) % 7 ) == 0 ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(u, %d, %d) ", type, (idx - lag + 2));
+
+ unprotect_entry(cache_ptr, type, idx-lag+2, NO_CHANGE, FALSE);
+ }
+
+ if ( ( pass ) && ( (idx - lag + 1) >= 0 ) &&
+ ( (idx - lag + 1) <= max_indices[type] ) &&
+ ( ( (idx - lag + 1) % 7 ) == 0 ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(p, %d, %d) ", type, (idx - lag + 1));
+
+ protect_entry(cache_ptr, type, (idx - lag + 1));
+ }
+
+
+ if ( do_destroys ) {
+
+ if ( ( pass ) && ( (idx - lag) >= 0 ) &&
+ ( ( idx - lag) <= max_indices[type] ) ) {
+
+ switch ( (idx - lag) %4 ) {
+
+ case 0: /* we just did an insert */
+ unprotect_entry(cache_ptr, type, idx - lag,
+ NO_CHANGE, FALSE);
+ break;
+
+ case 1:
+ if ( (entries[type])[idx-lag].dirty ) {
+
+ unprotect_entry(cache_ptr, type, idx - lag,
+ NO_CHANGE, FALSE);
+ } else {
+
+ unprotect_entry(cache_ptr, type, idx - lag,
+ dirty_unprotects, FALSE);
+ }
+ break;
+
+ case 2: /* we just did an insrt */
+ unprotect_entry(cache_ptr, type, idx - lag,
+ NO_CHANGE, TRUE);
+ break;
+
+ case 3:
+ if ( (entries[type])[idx-lag].dirty ) {
+
+ unprotect_entry(cache_ptr, type, idx - lag,
+ NO_CHANGE, TRUE);
+ } else {
+
+ unprotect_entry(cache_ptr, type, idx - lag,
+ dirty_destroys, TRUE);
+ }
+ break;
+
+ default:
+ HDassert(0); /* this can't happen... */
+ break;
+ }
+ }
+
+ } else {
+
+ if ( ( pass ) && ( (idx - lag) >= 0 ) &&
+ ( ( idx - lag) <= max_indices[type] ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(u, %d, %d) ", type, (idx - lag));
+
+ unprotect_entry(cache_ptr, type, idx - lag,
+ dirty_unprotects, FALSE);
+ }
+ }
+
+ if ( verbose )
+ HDfprintf(stdout, "\n");
+
+ idx++;
+ }
+ type++;
+ }
+
+ if ( ( pass ) && ( display_stats ) ) {
+
+ H5C_stats(cache_ptr, "test cache", display_detailed_stats);
+ }
+
+ return;
+
+} /* row_major_scan_forward() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: row_major_scan_backward()
+ *
+ * Purpose: Do a sequence of inserts, protects, unprotects, renames,
+ * destroys while scanning backwards through the set of
+ * entries. If pass is false on entry, do nothing.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 6/12/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+row_major_scan_backward(H5C_t * cache_ptr,
+ int32_t lag,
+ hbool_t verbose,
+ hbool_t reset_stats,
+ hbool_t display_stats,
+ hbool_t display_detailed_stats,
+ hbool_t do_inserts,
+ hbool_t dirty_inserts,
+ hbool_t do_renames,
+ hbool_t rename_to_main_addr,
+ hbool_t do_destroys,
+ int dirty_destroys,
+ int dirty_unprotects)
+{
+ const char * fcn_name = "row_major_scan_backward";
+ int32_t type;
+ int32_t idx;
+
+ if ( verbose )
+ HDfprintf(stdout, "%s(): Entering.\n", fcn_name);
+
+ HDassert( lag > 5 );
+
+ type = NUMBER_OF_ENTRY_TYPES - 1;
+
+ if ( ( pass ) && ( reset_stats ) ) {
+
+ H5C_stats__reset(cache_ptr);
+ }
+
+ while ( ( pass ) && ( type >= 0 ) )
+ {
+ idx = max_indices[type] + lag;
+
+ while ( ( pass ) && ( idx >= -lag ) )
+ {
+ if ( ( pass ) && ( do_inserts ) && ( (idx - lag) >= 0 ) &&
+ ( (idx - lag) <= max_indices[type] ) &&
+ ( ((idx - lag) % 2) == 1 ) &&
+ ( ! entry_in_cache(cache_ptr, type, (idx - lag)) ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(i, %d, %d) ", type, (idx - lag));
+
+ insert_entry(cache_ptr, type, (idx - lag), dirty_inserts);
+ }
+
+
+ if ( ( pass ) && ( (idx - lag + 1) >= 0 ) &&
+ ( (idx - lag + 1) <= max_indices[type] ) &&
+ ( ( (idx - lag + 1) % 3 ) == 0 ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(p, %d, %d) ", type, (idx - lag + 1));
+
+ protect_entry(cache_ptr, type, (idx - lag + 1));
+ }
+
+ if ( ( pass ) && ( (idx - lag + 2) >= 0 ) &&
+ ( (idx - lag + 2) <= max_indices[type] ) &&
+ ( ( (idx - lag + 2) % 3 ) == 0 ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(u, %d, %d) ", type, (idx - lag + 2));
+
+ unprotect_entry(cache_ptr, type, idx-lag+2, NO_CHANGE, FALSE);
+ }
+
+
+ if ( ( pass ) && ( do_renames ) && ( (idx - lag + 2) >= 0 ) &&
+ ( (idx - lag + 2) <= max_indices[type] ) &&
+ ( ( (idx - lag + 2) % 3 ) == 0 ) ) {
+
+ rename_entry(cache_ptr, type, (idx - lag + 2),
+ rename_to_main_addr);
+ }
+
+
+ if ( ( pass ) && ( (idx - lag + 3) >= 0 ) &&
+ ( (idx - lag + 3) <= max_indices[type] ) &&
+ ( ( (idx - lag + 3) % 5 ) == 0 ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(p, %d, %d) ", type, (idx - lag + 3));
+
+ protect_entry(cache_ptr, type, (idx - lag + 3));
+ }
+
+ if ( ( pass ) && ( (idx - lag + 5) >= 0 ) &&
+ ( (idx - lag + 5) <= max_indices[type] ) &&
+ ( ( (idx - lag + 5) % 5 ) == 0 ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(u, %d, %d) ", type, (idx - lag + 5));
+
+ unprotect_entry(cache_ptr, type, idx-lag+5, NO_CHANGE, FALSE);
+ }
+
+ if ( ( pass ) && ( idx >= 0 ) && ( idx <= max_indices[type] ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(p, %d, %d) ", type, idx);
+
+ protect_entry(cache_ptr, type, idx);
+ }
+
+
+ if ( ( pass ) && ( (idx + lag - 2) >= 0 ) &&
+ ( (idx + lag - 2) <= max_indices[type] ) &&
+ ( ( (idx + lag - 2) % 7 ) == 0 ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(u, %d, %d) ", type, (idx + lag - 2));
+
+ unprotect_entry(cache_ptr, type, idx+lag-2, NO_CHANGE, FALSE);
+ }
+
+ if ( ( pass ) && ( (idx + lag - 1) >= 0 ) &&
+ ( (idx + lag - 1) <= max_indices[type] ) &&
+ ( ( (idx + lag - 1) % 7 ) == 0 ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(p, %d, %d) ", type, (idx + lag - 1));
+
+ protect_entry(cache_ptr, type, (idx + lag - 1));
+ }
+
+
+ if ( do_destroys ) {
+
+ if ( ( pass ) && ( (idx + lag) >= 0 ) &&
+ ( ( idx + lag) <= max_indices[type] ) ) {
+
+ switch ( (idx + lag) %4 ) {
+
+ case 0:
+ if ( (entries[type])[idx+lag].dirty ) {
+
+ unprotect_entry(cache_ptr, type, idx + lag,
+ NO_CHANGE, FALSE);
+ } else {
+
+ unprotect_entry(cache_ptr, type, idx + lag,
+ dirty_unprotects, FALSE);
+ }
+ break;
+
+ case 1: /* we just did an insert */
+ unprotect_entry(cache_ptr, type, idx + lag,
+ NO_CHANGE, FALSE);
+ break;
+
+ case 2:
+ if ( (entries[type])[idx + lag].dirty ) {
+
+ unprotect_entry(cache_ptr, type, idx + lag,
+ NO_CHANGE, TRUE);
+ } else {
+
+ unprotect_entry(cache_ptr, type, idx + lag,
+ dirty_destroys, TRUE);
+ }
+ break;
+
+ case 3: /* we just did an insrt */
+ unprotect_entry(cache_ptr, type, idx + lag,
+ NO_CHANGE, TRUE);
+ break;
+
+ default:
+ HDassert(0); /* this can't happen... */
+ break;
+ }
+ }
+ } else {
+
+ if ( ( pass ) && ( (idx + lag) >= 0 ) &&
+ ( ( idx + lag) <= max_indices[type] ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(u, %d, %d) ", type, (idx - lag));
+
+ unprotect_entry(cache_ptr, type, idx + lag,
+ dirty_unprotects, FALSE);
+ }
+ }
+
+ if ( verbose )
+ HDfprintf(stdout, "\n");
+
+ idx--;
+ }
+ type--;
+ }
+
+ if ( ( pass ) && ( display_stats ) ) {
+
+ H5C_stats(cache_ptr, "test cache", display_detailed_stats);
+ }
+
+ return;
+
+} /* row_major_scan_backward() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: col_major_scan_forward()
+ *
+ * Purpose: Do a sequence of inserts, protects, and unprotects
+ * while scanning through the set of entries. If
+ * pass is false on entry, do nothing.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 6/23/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+col_major_scan_forward(H5C_t * cache_ptr,
+ int32_t lag,
+ hbool_t verbose,
+ hbool_t reset_stats,
+ hbool_t display_stats,
+ hbool_t display_detailed_stats,
+ hbool_t do_inserts,
+ hbool_t dirty_inserts,
+ int dirty_unprotects)
+{
+ const char * fcn_name = "col_major_scan_forward()";
+ int32_t type;
+ int32_t idx;
+
+ if ( verbose )
+ HDfprintf(stdout, "%s: entering.\n", fcn_name);
+
+ HDassert( lag > 5 );
+
+ type = 0;
+
+ if ( ( pass ) && ( reset_stats ) ) {
+
+ H5C_stats__reset(cache_ptr);
+ }
+
+ idx = -lag;
+
+ while ( ( pass ) && ( (idx - lag) <= MAX_ENTRIES ) )
+ {
+ type = 0;
+
+ while ( ( pass ) && ( type < NUMBER_OF_ENTRY_TYPES ) )
+ {
+ if ( ( pass ) && ( do_inserts ) && ( (idx + lag) >= 0 ) &&
+ ( (idx + lag) <= max_indices[type] ) &&
+ ( ((idx + lag) % 3) == 0 ) &&
+ ( ! entry_in_cache(cache_ptr, type, (idx + lag)) ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(i, %d, %d) ", type, (idx + lag));
+
+ insert_entry(cache_ptr, type, (idx + lag), dirty_inserts);
+ }
+
+ if ( ( pass ) && ( idx >= 0 ) && ( idx <= max_indices[type] ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(p, %d, %d) ", type, idx);
+
+ protect_entry(cache_ptr, type, idx);
+ }
+
+ if ( ( pass ) && ( (idx - lag) >= 0 ) &&
+ ( (idx - lag) <= max_indices[type] ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(u, %d, %d) ", type, (idx - lag));
+
+ unprotect_entry(cache_ptr, type, idx - lag,
+ dirty_unprotects, FALSE);
+ }
+
+ if ( verbose )
+ HDfprintf(stdout, "\n");
+
+ type++;
+ }
+
+ idx++;
+ }
+
+ if ( ( pass ) && ( display_stats ) ) {
+
+ H5C_stats(cache_ptr, "test cache", display_detailed_stats);
+ }
+
+ return;
+
+} /* col_major_scan_forward() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: col_major_scan_backward()
+ *
+ * Purpose: Do a sequence of inserts, protects, and unprotects
+ * while scanning backwards through the set of
+ * entries. If pass is false on entry, do nothing.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 6/23/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+col_major_scan_backward(H5C_t * cache_ptr,
+ int32_t lag,
+ hbool_t verbose,
+ hbool_t reset_stats,
+ hbool_t display_stats,
+ hbool_t display_detailed_stats,
+ hbool_t do_inserts,
+ hbool_t dirty_inserts,
+ int dirty_unprotects)
+{
+ const char * fcn_name = "col_major_scan_backward()";
+ int mile_stone = 1;
+ int32_t type;
+ int32_t idx;
+
+ if ( verbose )
+ HDfprintf(stdout, "%s: entering.\n", fcn_name);
+
+ HDassert( lag > 5 );
+
+ if ( ( pass ) && ( reset_stats ) ) {
+
+ H5C_stats__reset(cache_ptr);
+ }
+
+ idx = MAX_ENTRIES + lag;
+
+ if ( verbose ) /* 1 */
+ HDfprintf(stdout, "%s: point %d.\n", fcn_name, mile_stone++);
+
+
+ while ( ( pass ) && ( (idx + lag) >= 0 ) )
+ {
+ type = NUMBER_OF_ENTRY_TYPES - 1;
+
+ while ( ( pass ) && ( type >= 0 ) )
+ {
+ if ( ( pass ) && ( do_inserts) && ( (idx - lag) >= 0 ) &&
+ ( (idx - lag) <= max_indices[type] ) &&
+ ( ((idx - lag) % 3) == 0 ) &&
+ ( ! entry_in_cache(cache_ptr, type, (idx - lag)) ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(i, %d, %d) ", type, (idx - lag));
+
+ insert_entry(cache_ptr, type, (idx - lag), dirty_inserts);
+ }
+
+ if ( ( pass ) && ( idx >= 0 ) && ( idx <= max_indices[type] ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(p, %d, %d) ", type, idx);
+
+ protect_entry(cache_ptr, type, idx);
+ }
+
+ if ( ( pass ) && ( (idx + lag) >= 0 ) &&
+ ( (idx + lag) <= max_indices[type] ) ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "(u, %d, %d) ", type, (idx + lag));
+
+ unprotect_entry(cache_ptr, type, idx + lag,
+ dirty_unprotects, FALSE);
+ }
+
+ if ( verbose )
+ HDfprintf(stdout, "\n");
+
+ type--;
+ }
+
+ idx--;
+ }
+
+ if ( verbose ) /* 2 */
+ HDfprintf(stdout, "%s: point %d.\n", fcn_name, mile_stone++);
+
+ if ( ( pass ) && ( display_stats ) ) {
+
+ H5C_stats(cache_ptr, "test cache", display_detailed_stats);
+ }
+
+ if ( verbose )
+ HDfprintf(stdout, "%s: exiting.\n", fcn_name);
+
+ return;
+
+} /* col_major_scan_backward() */
+
+
+/**************************************************************************/
+/**************************************************************************/
+/********************************* tests: *********************************/
+/**************************************************************************/
+/**************************************************************************/
+
+/*-------------------------------------------------------------------------
+ * Function: smoke_check_1()
+ *
+ * Purpose: A basic functional test, inserts, destroys, and renames in
+ * the mix, along with repeated protects and unprotects.
+ * All entries are marked as clean.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 6/16/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+smoke_check_1(void)
+{
+ const char * fcn_name = "smoke_check_1";
+ hbool_t show_progress = FALSE;
+ hbool_t dirty_inserts = FALSE;
+ int dirty_unprotects = FALSE;
+ int dirty_destroys = FALSE;
+ hbool_t display_stats = FALSE;
+ int32_t lag = 10;
+ int mile_stone = 1;
+ H5C_t * cache_ptr = NULL;
+
+ TESTING("smoke check #1 -- all clean, ins, dest, ren, 4/2 MB cache");
+
+ pass = TRUE;
+
+ if ( show_progress ) /* 1 */
+ HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
+ fcn_name, mile_stone++, (int)pass);
+
+ reset_entries();
+
+ if ( show_progress ) /* 2 */
+ HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
+ fcn_name, mile_stone++, (int)pass);
+
+ cache_ptr = setup_cache((size_t)(4 * 1024 * 1024),
+ (size_t)(2 * 1024 * 1024));
+
+ if ( show_progress ) /* 3 */
+ HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
+ fcn_name, mile_stone++, (int)pass);
+
+ row_major_scan_forward(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ TRUE,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ dirty_inserts,
+ /* do_renames */ TRUE,
+ /* rename_to_main_addr */ FALSE,
+ /* do_destroys */ TRUE,
+ /* dirty_destroys */ dirty_destroys,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 4 */
+ HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
+ fcn_name, mile_stone++, (int)pass);
+
+ row_major_scan_backward(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ TRUE,
+ /* do_inserts */ FALSE,
+ /* dirty_inserts */ dirty_inserts,
+ /* do_renames */ TRUE,
+ /* rename_to_main_addr */ TRUE,
+ /* do_destroys */ FALSE,
+ /* dirty_destroys */ dirty_destroys,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 5 */
+ HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
+ fcn_name, mile_stone++, (int)pass);
+
+ row_major_scan_forward(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ TRUE,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ dirty_inserts,
+ /* do_renames */ TRUE,
+ /* rename_to_main_addr */ FALSE,
+ /* do_destroys */ FALSE,
+ /* dirty_destroys */ dirty_destroys,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 6 */
+ HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
+ fcn_name, mile_stone++, (int)pass);
+
+ /* flush and destroy all entries in the cache: */
+
+ flush_cache(/* cache_ptr */ cache_ptr,
+ /* destroy_entries */ TRUE,
+ /* dump_stats */ FALSE,
+ /* dump_detailed_stats */ FALSE);
+
+ if ( show_progress ) /* 7 */
+ HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
+ fcn_name, mile_stone++, (int)pass);
+
+ col_major_scan_forward(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ TRUE,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ dirty_inserts,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 8 */
+ HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
+ fcn_name, mile_stone++, (int)pass);
+
+ /* flush all entries in the cache: */
+
+ flush_cache(/* cache_ptr */ cache_ptr,
+ /* destroy_entries */ FALSE,
+ /* dump_stats */ FALSE,
+ /* dump_detailed_stats */ FALSE);
+
+ if ( show_progress ) /* 9 */
+ HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
+ fcn_name, mile_stone++, (int)pass);
+
+ col_major_scan_backward(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ TRUE,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ dirty_inserts,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 10 */
+ HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
+ fcn_name, mile_stone++, (int)pass);
+
+ takedown_cache(cache_ptr, display_stats, TRUE);
+
+ if ( show_progress ) /* 11 */
+ HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
+ fcn_name, mile_stone++, (int)pass);
+
+ verify_clean();
+ verify_unprotected();
+
+ if ( pass ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass )
+ HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n",
+ fcn_name, failure_mssg);
+
+} /* smoke_check_1() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: smoke_check_2()
+ *
+ * Purpose: A basic functional test, with inserts, destroys, and
+ * renames in the mix, along with some repeated protects
+ * and unprotects. About half the entries are marked as
+ * dirty.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 6/24/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+smoke_check_2(void)
+{
+ const char * fcn_name = "smoke_check_2";
+ hbool_t show_progress = FALSE;
+ hbool_t dirty_inserts = TRUE;
+ int dirty_unprotects = TRUE;
+ int dirty_destroys = TRUE;
+ hbool_t display_stats = FALSE;
+ int32_t lag = 10;
+ int mile_stone = 1;
+ H5C_t * cache_ptr = NULL;
+
+ TESTING("smoke check #2 -- ~1/2 dirty, ins, dest, ren, 4/2 MB cache");
+
+ pass = TRUE;
+
+ if ( show_progress ) /* 1 */
+ HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
+ fcn_name, mile_stone++, (int)pass);
+
+ reset_entries();
+
+ if ( show_progress ) /* 2 */
+ HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
+ fcn_name, mile_stone++, (int)pass);
+
+ cache_ptr = setup_cache((size_t)(4 * 1024 * 1024),
+ (size_t)(2 * 1024 * 1024));
+
+ if ( show_progress ) /* 3 */
+ HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
+ fcn_name, mile_stone++, (int)pass);
+
+ row_major_scan_forward(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ TRUE,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ dirty_inserts,
+ /* do_renames */ TRUE,
+ /* rename_to_main_addr */ FALSE,
+ /* do_destroys */ TRUE,
+ /* dirty_destroys */ dirty_destroys,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 4 */
+ HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
+ fcn_name, mile_stone++, (int)pass);
+
+ row_major_scan_backward(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ TRUE,
+ /* do_inserts */ FALSE,
+ /* dirty_inserts */ dirty_inserts,
+ /* do_renames */ TRUE,
+ /* rename_to_main_addr */ TRUE,
+ /* do_destroys */ FALSE,
+ /* dirty_destroys */ dirty_destroys,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 5 */
+ HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
+ fcn_name, mile_stone++, (int)pass);
+
+ row_major_scan_forward(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ TRUE,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ dirty_inserts,
+ /* do_renames */ TRUE,
+ /* rename_to_main_addr */ FALSE,
+ /* do_destroys */ FALSE,
+ /* dirty_destroys */ dirty_destroys,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 6 */
+ HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
+ fcn_name, mile_stone++, (int)pass);
+
+ /* flush and destroy all entries in the cache: */
+
+ flush_cache(/* cache_ptr */ cache_ptr,
+ /* destroy_entries */ TRUE,
+ /* dump_stats */ FALSE,
+ /* dump_detailed_stats */ FALSE);
+
+ if ( show_progress ) /* 7 */
+ HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
+ fcn_name, mile_stone++, (int)pass);
+
+ col_major_scan_forward(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ TRUE,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ dirty_inserts,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 8 */
+ HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
+ fcn_name, mile_stone++, (int)pass);
+
+ /* flush all entries in the cache: */
+
+ flush_cache(/* cache_ptr */ cache_ptr,
+ /* destroy_entries */ FALSE,
+ /* dump_stats */ FALSE,
+ /* dump_detailed_stats */ FALSE);
+
+ if ( show_progress ) /* 9 */
+ HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
+ fcn_name, mile_stone++, (int)pass);
+
+ col_major_scan_backward(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ TRUE,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ dirty_inserts,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 10 */
+ HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
+ fcn_name, mile_stone++, (int)pass);
+
+ takedown_cache(cache_ptr, display_stats, TRUE);
+
+ if ( show_progress ) /* 11 */
+ HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
+ fcn_name, mile_stone++, (int)pass);
+
+ verify_clean();
+ verify_unprotected();
+
+ if ( pass ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass )
+ HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n",
+ fcn_name, failure_mssg);
+
+} /* smoke_check_2() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: smoke_check_3()
+ *
+ * Purpose: A basic functional test on a tiny cache, with inserts,
+ * destroys, and renames in the mix, along with repeated
+ * protects and unprotects. All entries are marked as clean.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 6/16/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+smoke_check_3(void)
+{
+ const char * fcn_name = "smoke_check_3";
+ hbool_t show_progress = FALSE;
+ hbool_t dirty_inserts = FALSE;
+ int dirty_unprotects = FALSE;
+ int dirty_destroys = FALSE;
+ hbool_t display_stats = FALSE;
+ int32_t lag = 10;
+ int mile_stone = 1;
+ H5C_t * cache_ptr = NULL;
+
+ TESTING("smoke check #3 -- all clean, ins, dest, ren, 2/1 KB cache");
+
+ pass = TRUE;
+
+ if ( show_progress ) /* 1 */
+ HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
+ fcn_name, mile_stone++, (int)pass);
+
+ reset_entries();
+
+ if ( show_progress ) /* 2 */
+ HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
+ fcn_name, mile_stone++, (int)pass);
+
+ cache_ptr = setup_cache((size_t)(2 * 1024),
+ (size_t)(1 * 1024));
+
+ if ( show_progress ) /* 3 */
+ HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
+ fcn_name, mile_stone++, (int)pass);
+
+ row_major_scan_forward(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ TRUE,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ dirty_inserts,
+ /* do_renames */ TRUE,
+ /* rename_to_main_addr */ FALSE,
+ /* do_destroys */ TRUE,
+ /* dirty_destroys */ dirty_destroys,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 4 */
+ HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
+ fcn_name, mile_stone++, (int)pass);
+
+ row_major_scan_backward(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ TRUE,
+ /* do_inserts */ FALSE,
+ /* dirty_inserts */ dirty_inserts,
+ /* do_renames */ TRUE,
+ /* rename_to_main_addr */ TRUE,
+ /* do_destroys */ FALSE,
+ /* dirty_destroys */ dirty_destroys,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 5 */
+ HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
+ fcn_name, mile_stone++, (int)pass);
+
+ row_major_scan_forward(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ TRUE,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ dirty_inserts,
+ /* do_renames */ TRUE,
+ /* rename_to_main_addr */ FALSE,
+ /* do_destroys */ FALSE,
+ /* dirty_destroys */ dirty_destroys,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 6 */
+ HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
+ fcn_name, mile_stone++, (int)pass);
+
+ /* flush and destroy all entries in the cache: */
+
+ flush_cache(/* cache_ptr */ cache_ptr,
+ /* destroy_entries */ TRUE,
+ /* dump_stats */ FALSE,
+ /* dump_detailed_stats */ FALSE);
+
+ if ( show_progress ) /* 7 */
+ HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
+ fcn_name, mile_stone++, (int)pass);
+
+ col_major_scan_forward(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ TRUE,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ dirty_inserts,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 8 */
+ HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
+ fcn_name, mile_stone++, (int)pass);
+
+ /* flush all entries in the cache: */
+
+ flush_cache(/* cache_ptr */ cache_ptr,
+ /* destroy_entries */ FALSE,
+ /* dump_stats */ FALSE,
+ /* dump_detailed_stats */ FALSE);
+
+ if ( show_progress ) /* 9 */
+ HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
+ fcn_name, mile_stone++, (int)pass);
+
+ col_major_scan_backward(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ TRUE,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ dirty_inserts,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 10 */
+ HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
+ fcn_name, mile_stone++, (int)pass);
+
+ takedown_cache(cache_ptr, display_stats, TRUE);
+
+ if ( show_progress ) /* 11 */
+ HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
+ fcn_name, mile_stone++, (int)pass);
+
+ verify_clean();
+ verify_unprotected();
+
+ if ( pass ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass )
+ HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n",
+ fcn_name, failure_mssg);
+
+} /* smoke_check_3() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: smoke_check_4()
+ *
+ * Purpose: A basic functional test on a tiny cache, with inserts,
+ * destroys, and renames in the mix, along with repeated
+ * protects and unprotects. About half the entries are
+ * marked as dirty.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 6/24/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+smoke_check_4(void)
+{
+ const char * fcn_name = "smoke_check_4";
+ hbool_t show_progress = FALSE;
+ hbool_t dirty_inserts = TRUE;
+ int dirty_unprotects = TRUE;
+ int dirty_destroys = TRUE;
+ hbool_t display_stats = FALSE;
+ int32_t lag = 10;
+ int mile_stone = 1;
+ H5C_t * cache_ptr = NULL;
+
+ TESTING("smoke check #4 -- ~1/2 dirty, ins, dest, ren, 2/1 KB cache");
+
+ pass = TRUE;
+
+ if ( show_progress ) /* 1 */
+ HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
+ fcn_name, mile_stone++, (int)pass);
+
+ reset_entries();
+
+ if ( show_progress ) /* 2 */
+ HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
+ fcn_name, mile_stone++, (int)pass);
+
+ cache_ptr = setup_cache((size_t)(2 * 1024),
+ (size_t)(1 * 1024));
+
+ if ( show_progress ) /* 3 */
+ HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
+ fcn_name, mile_stone++, (int)pass);
+
+ row_major_scan_forward(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ TRUE,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ dirty_inserts,
+ /* do_renames */ TRUE,
+ /* rename_to_main_addr */ FALSE,
+ /* do_destroys */ TRUE,
+ /* dirty_destroys */ dirty_destroys,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 4 */
+ HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
+ fcn_name, mile_stone++, (int)pass);
+
+ row_major_scan_backward(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ TRUE,
+ /* do_inserts */ FALSE,
+ /* dirty_inserts */ dirty_inserts,
+ /* do_renames */ TRUE,
+ /* rename_to_main_addr */ TRUE,
+ /* do_destroys */ FALSE,
+ /* dirty_destroys */ dirty_destroys,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 5 */
+ HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
+ fcn_name, mile_stone++, (int)pass);
+
+ row_major_scan_forward(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ TRUE,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ dirty_inserts,
+ /* do_renames */ TRUE,
+ /* rename_to_main_addr */ FALSE,
+ /* do_destroys */ FALSE,
+ /* dirty_destroys */ dirty_destroys,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 6 */
+ HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
+ fcn_name, mile_stone++, (int)pass);
+
+ /* flush and destroy all entries in the cache: */
+
+ flush_cache(/* cache_ptr */ cache_ptr,
+ /* destroy_entries */ TRUE,
+ /* dump_stats */ FALSE,
+ /* dump_detailed_stats */ FALSE);
+
+ if ( show_progress ) /* 7 */
+ HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
+ fcn_name, mile_stone++, (int)pass);
+
+ col_major_scan_forward(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ TRUE,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ dirty_inserts,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 8 */
+ HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
+ fcn_name, mile_stone++, (int)pass);
+
+ /* flush all entries in the cache: */
+
+ flush_cache(/* cache_ptr */ cache_ptr,
+ /* destroy_entries */ FALSE,
+ /* dump_stats */ FALSE,
+ /* dump_detailed_stats */ FALSE);
+
+ if ( show_progress ) /* 9 */
+ HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
+ fcn_name, mile_stone++, (int)pass);
+
+ col_major_scan_backward(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ TRUE,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ dirty_inserts,
+ /* dirty_unprotects */ dirty_unprotects);
+
+ if ( show_progress ) /* 10 */
+ HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
+ fcn_name, mile_stone++, (int)pass);
+
+ takedown_cache(cache_ptr, display_stats, TRUE);
+
+ if ( show_progress ) /* 11 */
+ HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
+ fcn_name, mile_stone++, (int)pass);
+
+ verify_clean();
+ verify_unprotected();
+
+ if ( pass ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass )
+ HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n",
+ fcn_name, failure_mssg);
+
+} /* smoke_check_4() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: write_permitted_check()
+ *
+ * Purpose: A basic test of the write permitted function. In essence,
+ * we load the cache up with dirty entryies, set
+ * write_permitted to FALSE, and then protect a bunch of
+ * entries. If there are any writes while write_permitted is
+ * FALSE, the test will fail.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 6/24/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+write_permitted_check(void)
+{
+ const char * fcn_name = "write_permitted_check";
+ hbool_t show_progress = FALSE;
+ hbool_t display_stats = FALSE;
+ int32_t lag = 10;
+ int mile_stone = 1;
+ H5C_t * cache_ptr = NULL;
+
+ TESTING("write permitted check -- 1/0 MB cache");
+
+ pass = TRUE;
+
+ if ( show_progress ) /* 1 */
+ HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
+ fcn_name, mile_stone++, (int)pass);
+
+ reset_entries();
+
+ if ( show_progress ) /* 2 */
+ HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
+ fcn_name, mile_stone++, (int)pass);
+
+ cache_ptr = setup_cache((size_t)(1 * 1024 * 1024),
+ (size_t)(0));
+
+ if ( show_progress ) /* 3 */
+ HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
+ fcn_name, mile_stone++, (int)pass);
+
+ row_major_scan_forward(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ TRUE,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ TRUE,
+ /* do_renames */ TRUE,
+ /* rename_to_main_addr */ FALSE,
+ /* do_destroys */ TRUE,
+ /* dirty_destroys */ TRUE,
+ /* dirty_unprotects */ TRUE);
+
+ if ( show_progress ) /* 4 */
+ HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
+ fcn_name, mile_stone++, (int)pass);
+
+ write_permitted = FALSE;
+
+ row_major_scan_backward(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ TRUE,
+ /* do_inserts */ FALSE,
+ /* dirty_inserts */ FALSE,
+ /* do_renames */ TRUE,
+ /* rename_to_main_addr */ TRUE,
+ /* do_destroys */ FALSE,
+ /* dirty_destroys */ FALSE,
+ /* dirty_unprotects */ NO_CHANGE);
+
+ if ( show_progress ) /* 5 */
+ HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
+ fcn_name, mile_stone++, (int)pass);
+
+ write_permitted = TRUE;
+
+ row_major_scan_forward(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ TRUE,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ TRUE,
+ /* do_renames */ TRUE,
+ /* rename_to_main_addr */ FALSE,
+ /* do_destroys */ FALSE,
+ /* dirty_destroys */ TRUE,
+ /* dirty_unprotects */ TRUE);
+
+ if ( show_progress ) /* 6 */
+ HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
+ fcn_name, mile_stone++, (int)pass);
+
+ /* flush and destroy all entries in the cache: */
+
+ flush_cache(/* cache_ptr */ cache_ptr,
+ /* destroy_entries */ TRUE,
+ /* dump_stats */ FALSE,
+ /* dump_detailed_stats */ FALSE);
+
+ if ( show_progress ) /* 7 */
+ HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
+ fcn_name, mile_stone++, (int)pass);
+
+ col_major_scan_forward(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ TRUE,
+ /* do_inserts */ TRUE,
+ /* dirty_inserts */ TRUE,
+ /* dirty_unprotects */ TRUE);
+
+ if ( show_progress ) /* 8 */
+ HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
+ fcn_name, mile_stone++, (int)pass);
+
+ write_permitted = FALSE;
+
+ col_major_scan_backward(/* cache_ptr */ cache_ptr,
+ /* lag */ lag,
+ /* verbose */ FALSE,
+ /* reset_stats */ TRUE,
+ /* display_stats */ display_stats,
+ /* display_detailed_stats */ TRUE,
+ /* do_inserts */ FALSE,
+ /* dirty_inserts */ FALSE,
+ /* dirty_unprotects */ NO_CHANGE);
+
+ write_permitted = TRUE;
+
+ if ( show_progress ) /* 9 */
+ HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
+ fcn_name, mile_stone++, (int)pass);
+
+ takedown_cache(cache_ptr, display_stats, TRUE);
+
+ if ( show_progress ) /* 10 */
+ HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
+ fcn_name, mile_stone++, (int)pass);
+
+ verify_clean();
+ verify_unprotected();
+
+ if ( pass ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass )
+ HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n",
+ fcn_name, failure_mssg);
+
+} /* write_permitted_check() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_flush_protected_err()
+ *
+ * Purpose: Verify that an attempt to flush the cache when it contains
+ * a protected entry will generate an error.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 6/24/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+check_flush_protected_err(void)
+{
+ const char * fcn_name = "check_flush_protected_err";
+ H5C_t * cache_ptr = NULL;
+
+ TESTING("flush cache with protected entry error");
+
+ pass = TRUE;
+
+ /* allocate a cache, protect an entry, and try to flush. This
+ * should fail. Unprotect the entry and flush again -- should
+ * succeed.
+ */
+
+ if ( pass ) {
+
+ reset_entries();
+
+ cache_ptr = setup_cache((size_t)(2 * 1024),
+ (size_t)(1 * 1024));
+
+ protect_entry(cache_ptr, 0, 0);
+
+ if ( H5C_flush_cache(NULL, -1, -1, cache_ptr, 0) >= 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "flush succeeded on cache with protected entry.\n";
+
+ } else {
+
+ unprotect_entry(cache_ptr, 0, 0, TRUE, FALSE);
+
+ if ( H5C_flush_cache(NULL, -1, -1, cache_ptr, 0) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "flush failed after unprotect.\n";
+
+ } else {
+
+ takedown_cache(cache_ptr, FALSE, FALSE);
+ }
+ }
+ }
+
+ if ( pass ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass )
+ HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n",
+ fcn_name, failure_mssg);
+
+} /* check_flush_protected_err() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_destroy_protected_err()
+ *
+ * Purpose: Verify that an attempt to destroy the cache when it contains
+ * a protected entry will generate an error.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 6/24/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+check_destroy_protected_err(void)
+{
+ const char * fcn_name = "check_destroy_protected_err";
+ H5C_t * cache_ptr = NULL;
+
+ TESTING("destroy cache with protected entry error");
+
+ pass = TRUE;
+
+ /* allocate a cache, protect an entry, and try to flush. This
+ * should fail. Unprotect the entry and flush again -- should
+ * succeed.
+ */
+
+ if ( pass ) {
+
+ reset_entries();
+
+ cache_ptr = setup_cache((size_t)(2 * 1024),
+ (size_t)(1 * 1024));
+
+ protect_entry(cache_ptr, 0, 0);
+
+ if ( H5C_dest(NULL, -1, -1, cache_ptr) >= 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "destroy succeeded on cache with protected entry.\n";
+
+ } else {
+
+ unprotect_entry(cache_ptr, 0, 0, TRUE, FALSE);
+
+ if ( H5C_dest(NULL, -1, -1, cache_ptr) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "destroy failed after unprotect.\n";
+
+ }
+ }
+ }
+
+ if ( pass ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass )
+ HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n",
+ fcn_name, failure_mssg);
+
+} /* check_destroy_protected_err() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_duplicate_insert_err()
+ *
+ * Purpose: Verify that an attempt to insert and entry that is
+ * alread in the cache will generate an error.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 6/24/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+check_duplicate_insert_err(void)
+{
+ const char * fcn_name = "check_duplicate_insert_err";
+ herr_t result;
+ H5C_t * cache_ptr = NULL;
+ test_entry_t * base_addr;
+ test_entry_t * entry_ptr;
+
+ TESTING("duplicate entry insertion error");
+
+ pass = TRUE;
+
+ /* allocate a cache, protect an entry, and then try to insert
+ * the entry again. This should fail. Unprotect the entry and
+ * destroy the cache -- should succeed.
+ */
+
+ if ( pass ) {
+
+ reset_entries();
+
+ cache_ptr = setup_cache((size_t)(2 * 1024),
+ (size_t)(1 * 1024));
+
+ protect_entry(cache_ptr, 0, 0);
+
+ if ( pass ) {
+
+ base_addr = entries[0];
+ entry_ptr = &(base_addr[0]);
+
+ result = H5C_insert_entry(NULL, -1, -1, cache_ptr,
+ &(types[0]), entry_ptr->addr,
+ (void *)entry_ptr);
+
+ if ( result >= 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "insert of duplicate entry succeeded.\n";
+
+ } else {
+
+ unprotect_entry(cache_ptr, 0, 0, TRUE, FALSE);
+
+ takedown_cache(cache_ptr, FALSE, FALSE);
+ }
+ }
+ }
+
+ if ( pass ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass )
+ HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n",
+ fcn_name, failure_mssg);
+
+} /* check_duplicate_insert_err() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_rename_err()
+ *
+ * Purpose: Verify that an attempt to rename an entry to the address
+ * of an existing entry will generate an error.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 6/24/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+check_rename_err(void)
+{
+ const char * fcn_name = "check_rename_err()";
+ herr_t result;
+ H5C_t * cache_ptr = NULL;
+ test_entry_t * entry_0_0_ptr;
+ test_entry_t * entry_0_1_ptr;
+ test_entry_t * entry_1_0_ptr;
+
+ TESTING("rename to existing entry errors");
+
+ pass = TRUE;
+
+ /* allocate a cache, and insert several entries. Try to rename
+ * entries to other entries resident in the cache. This should
+ * fail. Destroy the cache -- should succeed.
+ */
+
+ if ( pass ) {
+
+ reset_entries();
+
+ cache_ptr = setup_cache((size_t)(2 * 1024),
+ (size_t)(1 * 1024));
+
+ insert_entry(cache_ptr, 0, 0, TRUE);
+ insert_entry(cache_ptr, 0, 1, TRUE);
+ insert_entry(cache_ptr, 1, 0, TRUE);
+
+ entry_0_0_ptr = &((entries[0])[0]);
+ entry_0_1_ptr = &((entries[0])[1]);
+ entry_1_0_ptr = &((entries[1])[0]);
+ }
+
+ if ( pass ) {
+
+ result = H5C_rename_entry(NULL, cache_ptr, &(types[0]),
+ entry_0_0_ptr->addr, entry_0_1_ptr->addr);
+
+ if ( result >= 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "rename to addr of same type succeeded.\n";
+ }
+ }
+
+ if ( pass ) {
+
+ result = H5C_rename_entry(NULL, cache_ptr, &(types[0]),
+ entry_0_0_ptr->addr, entry_1_0_ptr->addr);
+
+ if ( result >= 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "rename to addr of different type succeeded.\n";
+ }
+ }
+
+ if ( pass ) {
+
+ takedown_cache(cache_ptr, FALSE, FALSE);
+ }
+
+ if ( pass ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass )
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n",
+ fcn_name, failure_mssg);
+
+} /* check_rename_err() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_double_protect_err()
+ *
+ * Purpose: Verify that an attempt to protect an entry that is already
+ * protected will generate an error.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 6/24/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+check_double_protect_err(void)
+{
+ const char * fcn_name = "check_double_protect_err()";
+ H5C_t * cache_ptr = NULL;
+ test_entry_t * entry_ptr;
+ H5C_cache_entry_t * cache_entry_ptr;
+
+ TESTING("protect a protected entry error");
+
+ pass = TRUE;
+
+ /* allocate a cache, protect an entry, and then try to protect
+ * the entry again. This should fail. Unprotect the entry and
+ * destroy the cache -- should succeed.
+ */
+
+ if ( pass ) {
+
+ reset_entries();
+
+ cache_ptr = setup_cache((size_t)(2 * 1024),
+ (size_t)(1 * 1024));
+
+ protect_entry(cache_ptr, 0, 0);
+
+ entry_ptr = &((entries[0])[0]);
+ }
+
+ if ( pass ) {
+
+ cache_entry_ptr = H5C_protect(NULL, -1, -1, cache_ptr, &(types[0]),
+ entry_ptr->addr, NULL, NULL);
+
+ if ( cache_entry_ptr != NULL ) {
+
+ pass = FALSE;
+ failure_mssg = "attempt to protect a protected entry succeeded.\n";
+ }
+ }
+
+ if ( pass ) {
+
+ unprotect_entry(cache_ptr, 0, 0, FALSE, FALSE);
+ }
+
+ if ( pass ) {
+
+ takedown_cache(cache_ptr, FALSE, FALSE);
+ }
+
+ if ( pass ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass )
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n",
+ fcn_name, failure_mssg);
+
+} /* check_double_protect_err() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_double_unprotect_err()
+ *
+ * Purpose: Verify that an attempt to unprotect an entry that is already
+ * unprotected will generate an error.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 6/24/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+check_double_unprotect_err(void)
+{
+ const char * fcn_name = "check_double_unprotect_err()";
+ herr_t result;
+ H5C_t * cache_ptr = NULL;
+ test_entry_t * entry_ptr;
+
+ TESTING("unprotect an unprotected entry error");
+
+ pass = TRUE;
+
+ /* allocate a cache, protect an entry, unprotect it, and then try to
+ * unprotect the entry again. This should fail. Destroy the cache
+ * -- should succeed.
+ */
+
+ if ( pass ) {
+
+ reset_entries();
+
+ cache_ptr = setup_cache((size_t)(2 * 1024),
+ (size_t)(1 * 1024));
+
+ protect_entry(cache_ptr, 0, 0);
+
+ unprotect_entry(cache_ptr, 0, 0, FALSE, FALSE);
+
+ entry_ptr = &((entries[0])[0]);
+ }
+
+ if ( pass ) {
+
+ result = H5C_unprotect(NULL, -1, -1, cache_ptr, &(types[0]),
+ entry_ptr->addr, (void *)entry_ptr, FALSE);
+
+ if ( result > 0 ) {
+
+ pass = FALSE;
+ failure_mssg =
+ "attempt to unprotect an unprotected entry succeeded.\n";
+ }
+ }
+
+ if ( pass ) {
+
+ takedown_cache(cache_ptr, FALSE, FALSE);
+ }
+
+ if ( pass ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass )
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n",
+ fcn_name, failure_mssg);
+
+} /* check_double_unprotect_err() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: main
+ *
+ * Purpose: Run tests on the cache code contained in H5C.c
+ *
+ * Return: Success:
+ *
+ * Failure:
+ *
+ * Programmer: John Mainzer
+ * 6/24/04
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+main(void)
+{
+ H5open();
+#if 0
+ smoke_check_1();
+ smoke_check_2();
+#endif
+ smoke_check_3();
+ smoke_check_4();
+#if 0
+ write_permitted_check();
+#endif
+ check_flush_protected_err();
+ check_destroy_protected_err();
+ check_duplicate_insert_err();
+ check_rename_err();
+ check_double_protect_err();
+ check_double_unprotect_err();
+
+ return(0);
+
+} /* main() */